diff --git a/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql b/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql index af6fb154e2..2a10456ab0 100644 --- a/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql +++ b/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql @@ -33,6 +33,7 @@ INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, ` INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (145,'tendbcluster','dbconf','MySQL-5.6','my.cnf配置','MySQL-5.6','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'5.6_参数配置','2022-04-25 10:19:22','2023-12-22 04:16:53',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (144,'tendbcluster','dbconf','MySQL-5.7','my.cnf配置','MySQL-5.7','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'5.7配置','2022-04-25 10:19:22','2023-06-29 10:33:14',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (143,'tendbcluster','dbconf','MySQL-8.0','','MySQL-8.0','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL8.0配置','2022-06-02 17:27:34','2023-06-29 10:32:50',''); +INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (362,'tendbcluster','dbconf','RocksDB-5.7','','RocksDB-5.7','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL-RocksDB-5.7','2022-06-02 17:27:34','2024-12-05 16:26:29',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (147,'tendbcluster','dbconf','Spider-1','my.cnf配置','Spider 1.x','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 1.x 接入层','2022-04-25 10:19:22','2023-06-29 10:33:02',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (148,'tendbcluster','dbconf','Spider-3','my.cnf配置','Spider 3.x推荐版本','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 3.x 接入层','2022-04-25 10:19:22','2024-09-04 10:36:16',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (361,'tendbcluster','dbconf','Spider-3.5','my.cnf配置','Spider 3.5.x','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 3.x 接入层','2022-04-25 10:19:22','2023-06-29 10:33:05',''); @@ -40,6 +41,7 @@ INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, ` INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (353,'tendbcluster','dbconf','Spider-3.7','my.cnf配置','Spider 3.7.x','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 3.x 接入层','2022-04-25 10:19:22','2023-06-29 10:33:05',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (355,'tendbcluster','dbconf','Spider-3.8','my.cnf配置','Spider 3.8.x','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 3.x 接入层','2022-04-25 10:19:22','2023-06-29 10:33:05',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (177,'tendbcluster','dbconf','Tdbctl','my.cnf配置','tdbctl中控配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'tdbctl中控配置','2022-04-25 10:19:22','2023-05-10 19:35:47',''); +INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (364,'tendbcluster','dbconf','TokuDB-5.6','','TokuDB-5.6','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL-TokuDB-5.6','2022-06-02 17:27:34','2024-12-05 16:26:29',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (270,'tendbcluster','dbconf','TXSQL-8.0','','TXSQL-8.0','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL8.0-txsql配置','2022-06-02 17:27:34','2023-11-14 15:09:13',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (192,'tendbcluster','deploy','deploy_info','部署配置',NULL,'plat,app,module,cluster','',0,1,0,NULL,5,365,0,NULL,'2023-03-09 17:40:06','2023-03-20 21:40:05',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (194,'tendbcluster','sys','sysfile','系统配置',NULL,'plat','',1,1,0,NULL,5,365,0,NULL,'2023-03-09 17:40:06','2023-03-20 21:40:05',''); @@ -2649,6 +2651,7 @@ INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, ` INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15802,'tendbcluster','dbconf','Tdbctl','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25565,'tendbcluster','dbconf','TokuDB-5.6','mysqld.default_storage_engine','STRING','Tokudb','Tokudb','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-12-27 07:14:28',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25574,'tendbcluster','dbconf','TokuDB-5.6','mysqld.innodb_buffer_pool_size','INT','200M','[100m, 64000m]','BYTES',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-11-15 12:15:56',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25855,'tendbcluster','dbconf','TokuDB-5.6','mysqld.plugin-load','STRING','TokuDB=ha_tokudb.so;TokuDB_trx=ha_tokudb.so;TokuDB_locks=ha_tokudb.so;TokuDB_lock_waits=ha_tokudb.so;TokuDB_file_map=ha_tokudb.so;TokuDB_fractal_tree_info=ha_tokudb.so;TokuDB_fractal_tree_block_map=ha_tokudb.so','','STRING',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-12-27 07:14:28',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25566,'tendbcluster','dbconf','TokuDB-5.6','mysqld.tokudb_cache_size','STRING','16000M','[100m,12800m]','BYTES',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-11-15 14:37:17',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25567,'tendbcluster','dbconf','TokuDB-5.6','mysqld.tokudb_commit_sync','STRING','0','0 | 1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-11-15 14:37:17',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25571,'tendbcluster','dbconf','TokuDB-5.6','mysqld.tokudb_data_dir','STRING','{{.Mysqld.Datadir}}/tokudb/data','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-11-15 14:39:00',0); diff --git a/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql b/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql index e2edc8d296..69befbab49 100644 --- a/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql +++ b/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql @@ -33,6 +33,8 @@ INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, ` INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (9,'tendbha','dbconf','MySQL-5.6','my.cnf配置','MySQL-5.6','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'5.6_参数配置','2022-04-25 10:19:22','2023-08-11 13:12:14',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (10,'tendbha','dbconf','MySQL-5.7','my.cnf配置','MySQL-5.7','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'5.7_参数配置','2022-04-25 10:19:22','2023-06-29 10:34:49',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (40,'tendbha','dbconf','MySQL-8.0','','MySQL-8.0','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL8.0配置','2022-06-02 17:27:34','2023-08-11 13:12:18',''); +INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (363,'tendbha','dbconf','RocksDB-5.7','','RocksDB-5.7','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL-RocksDB-5.7','2022-06-02 17:27:34','2024-12-05 16:26:29',''); +INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (365,'tendbha','dbconf','TokuDB-5.6','','TokuDB-5.6','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL-TokuDB-5.6','2022-06-02 17:27:34','2024-12-05 16:26:29',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (269,'tendbha','dbconf','TXSQL-8.0','','TXSQL-8.0','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL8.0-txsql配置','2022-06-02 17:27:34','2023-11-14 15:09:06',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (6,'tendbha','dbha','dbha','DBHA切换配置',NULL,'plat,app,city,module,cluster','',1,1,0,NULL,5,365,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05',''); INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (4,'tendbha','deploy','deploy_info','部署配置',NULL,'plat,app,module,cluster','',0,1,0,NULL,5,365,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05',''); @@ -1030,6 +1032,17 @@ INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, ` INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25545,'tendbha','dbconf','RocksDB-5.7','mysqld.rocksdb_max_total_wal_size','INT','4G','[256m, 65536m]','BYTES',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-11-15 12:15:56',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25547,'tendbha','dbconf','RocksDB-5.7','mysqld.rocksdb_strict_collation_check','STRING','off','off | on','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-12-27 07:14:28',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25546,'tendbha','dbconf','RocksDB-5.7','mysqld.rocksdb_table_cache_numshardbits','INT','6','[1, 128]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-12-27 07:14:28',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25857,'tendbha','dbconf','TokuDB-5.6','mysqld.default_storage_engine','STRING','Tokudb','Tokudb','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25866,'tendbha','dbconf','TokuDB-5.6','mysqld.innodb_buffer_pool_size','INT','200M','[100m, 64000m]','BYTES',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25856,'tendbha','dbconf','TokuDB-5.6','mysqld.plugin-load','STRING','TokuDB=ha_tokudb.so;TokuDB_trx=ha_tokudb.so;TokuDB_locks=ha_tokudb.so;TokuDB_lock_waits=ha_tokudb.so;TokuDB_file_map=ha_tokudb.so;TokuDB_fractal_tree_info=ha_tokudb.so;TokuDB_fractal_tree_block_map=ha_tokudb.so','','STRING',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25858,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_cache_size','STRING','16000M','[100m,12800m]','BYTES',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25859,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_commit_sync','STRING','0','0 | 1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25863,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_data_dir','STRING','{{.Mysqld.Datadir}}/tokudb/data','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25861,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_fsync_log_period','INT','1000','[0, 4294967295]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25860,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_fs_reserve_percent','INT','0','[0, 100]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25862,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_lock_timeout','INT','50000','[0, 100000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25864,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_log_dir','STRING','{{.Mysqld.Datadir}}/tokudb/log','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); +INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25865,'tendbha','dbconf','TokuDB-5.6','mysqld.tokudb_tmp_dir','STRING','{{.Mysqld.Datadir}}/tmp','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2024-12-05 16:33:38',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (18227,'tendbha','dbconf','TXSQL-8.0','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-11-14 15:18:34','2023-11-14 15:18:34',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (18228,'tendbha','dbconf','TXSQL-8.0','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-11-14 15:18:34','2023-11-14 15:18:34',0); INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (18229,'tendbha','dbconf','TXSQL-8.0','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2023-11-14 15:18:34','2023-11-14 15:18:34',0); diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover.go index de185c2852..92a7f2a037 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover.go @@ -54,7 +54,7 @@ func (x *Xtrabackup) PreRun() error { return err } - logger.Info("clean local mysqld data dirs") + logger.Info("start to clean local mysqld data dirs") // 清理本地目录 if err := x.cleanXtraEnv(); err != nil { return err @@ -174,14 +174,13 @@ func (x *Xtrabackup) cleanXtraEnv() error { "datadir", "innodb_log_group_home_dir", "innodb_data_home_dir", - "tokudb_log_dir", - "tokudb_data_dir", "relay-log", "log_bin", "tmpdir", } if x.StorageType == "tokudb" { - dirs = []string{"tokudb_log_dir", "tokudb_data_dir", "tmpdir"} + dirs = []string{"tokudb_log_dir", "tokudb_data_dir", "tmpdir", "relay-log", + "innodb_log_group_home_dir", "innodb_data_home_dir"} // replace ibdata1 } return x.CleanEnv(dirs) } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover_repaire.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover_repaire.go index 91502307fb..6f06bd17b0 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover_repaire.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader/xtrabackup_recover_repaire.go @@ -13,6 +13,7 @@ package dbbackup_loader import ( "database/sql" "fmt" + "os" "path" "path/filepath" "regexp" @@ -229,7 +230,7 @@ func (x *Xtrabackup) CleanEnv(dirs []string) error { return fmt.Errorf("port %d is still opened", x.TgtInstance.Port) } - var dirArray []string + var pathsToReset []string for _, v := range dirs { if strings.TrimSpace(x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "")) == "" { logger.Warn(fmt.Sprintf("my.cnf %s is Emtpty!!", v)) @@ -244,7 +245,7 @@ func (x *Xtrabackup) CleanEnv(dirs []string) error { reg := regexp.MustCompile(cst.RelayLogFileMatch) if result := reg.FindStringSubmatch(val); len(result) == 2 { relayLogDir := result[1] - dirArray = append(dirArray, "rm -rf "+relayLogDir+"/*") + pathsToReset = append(pathsToReset, relayLogDir) } case "log_bin", "log-bin": val, err := x.myCnf.GetMySQLLogDir() @@ -255,26 +256,20 @@ func (x *Xtrabackup) CleanEnv(dirs []string) error { if result := reg.FindStringSubmatch(val); len(result) == 2 { binlogDir := result[1] // TODO 所有 rm -rf 的地方都应该要检查是否可能 rm -rf / binlog.xxx 这种误删可能 - dirArray = append(dirArray, "rm -rf "+binlogDir+"/*") + pathsToReset = append(pathsToReset, binlogDir) } case "slow_query_log_file", "slow-query-log-file": if val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "slow_query_log_file", ""); val != "" { - dirArray = append(dirArray, "rm -f "+val) + pathsToReset = append(pathsToReset, val) } default: val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "") if strings.TrimSpace(val) != "" && strings.TrimSpace(val) != "/" { - dirArray = append(dirArray, "rm -rf "+val+"/*") + pathsToReset = append(pathsToReset, val) } } } - scripts := strings.Join(dirArray, "\n") - logger.Info("CleanEnv: %s", scripts) - // run with mysql os user - if _, err := osutil.ExecShellCommand(false, scripts); err != nil { - return err - } - return nil + return ResetPath(pathsToReset) } // ReplaceMycnf godoc @@ -338,3 +333,31 @@ func (x *Xtrabackup) getSocketName() string { sock := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "socket", "/tmp/mysql.sock") return sock } + +// ResetPath clean files +// if filepath is dir, clean all files in it (file permission and owner is NOT preserved) +// if filepath is file, remove it +// this function is used to avoid "/bin/rm: Argument list too long" when using rm -rf /xxx/path/* +func ResetPath(paths []string) error { + for _, pa := range paths { + if strings.TrimSpace(pa) == "/" { + return errors.Errorf("path %s is not allowed to clean", pa) + } + if cmutil.IsDirectory(pa) { + logger.Info("Clean Dir: %s", pa) + if err := os.RemoveAll(pa); err != nil { + return errors.WithMessage(err, "clean dir") + } else { // recreate dir + if err = os.MkdirAll(pa, 0755); err != nil { + return errors.WithMessage(err, "recreate dir") + } + } + } else { + logger.Info("Remove File: %s", pa) + if err := os.RemoveAll(pa); err != nil { + return errors.WithMessage(err, "remove file") + } + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go index 7c80c6be0c..26b4965ce1 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go @@ -12,6 +12,7 @@ import ( "dbm-services/common/go-pubpkg/cmutil" "dbm-services/common/go-pubpkg/logger" "dbm-services/common/go-pubpkg/mysqlcomm" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbbackup_loader" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/native" "dbm-services/mysql/db-tools/dbactuator/pkg/util" @@ -167,7 +168,7 @@ func (x *XLoad) CleanEnv(dirs []string) error { return fmt.Errorf("port %d is still opened", x.TgtInstance.Port) } - var dirArray []string + var pathsToReset []string for _, v := range dirs { if strings.TrimSpace(x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "")) == "" { logger.Warn(fmt.Sprintf("my.cnf %s is Emtpty!!", v)) @@ -182,7 +183,7 @@ func (x *XLoad) CleanEnv(dirs []string) error { reg := regexp.MustCompile(cst.RelayLogFileMatch) if result := reg.FindStringSubmatch(val); len(result) == 2 { relaylogdir := result[1] - dirArray = append(dirArray, "rm -rf "+relaylogdir+"/*") + pathsToReset = append(pathsToReset, relaylogdir) } case "log_bin", "log-bin": val, err := x.myCnf.GetMySQLLogDir() @@ -193,26 +194,21 @@ func (x *XLoad) CleanEnv(dirs []string) error { if result := reg.FindStringSubmatch(val); len(result) == 2 { binlogdir := result[1] // TODO 所有 rm -rf 的地方都应该要检查是否可能 rm -rf / binlog.xxx 这种误删可能 - dirArray = append(dirArray, "rm -rf "+binlogdir+"/*") + pathsToReset = append(pathsToReset, binlogdir) } case "slow_query_log_file", "slow-query-log-file": if val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "slow_query_log_file", ""); val != "" { - dirArray = append(dirArray, "rm -f "+val) + pathsToReset = append(pathsToReset, val) } default: val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "") if strings.TrimSpace(val) != "" && strings.TrimSpace(val) != "/" { - dirArray = append(dirArray, "rm -rf "+val+"/*") + pathsToReset = append(pathsToReset, val) } } } - scripts := strings.Join(dirArray, "\n") - logger.Info("CleanEnv: %s", scripts) - // run with mysql os user - if _, err := osutil.ExecShellCommand(false, scripts); err != nil { - return err - } - return nil + + return dbbackup_loader.ResetPath(pathsToReset) } // ReplaceMycnf godoc diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_dump.go b/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_dump.go index 20de195143..77a289581d 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_dump.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_dump.go @@ -91,6 +91,7 @@ var dumpCmd = &cobra.Command{ }{} body.Name = "dbbackup-by-host" body.Content = fmt.Sprintf("run dbbackup failed %s", err.Error()) + body.Dimension = map[string]interface{}{} if sendErr := manager.SendEvent(body.Name, body.Content, body.Dimension); sendErr != nil { logger.Log.Error("SendEvent failed", sendErr.Error()) } diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_spider.go b/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_spider.go index 625c893cdd..8fd0e6f87e 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_spider.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_spider.go @@ -93,9 +93,9 @@ func findSpiderBackupConfigFile(cnfFiles []string) (string, error) { var spiderScheduleCmd = &cobra.Command{ Use: "schedule", - SilenceUsage: true, Short: "spiderbackup schedule", Long: `Start spider global backup. Will initialize backup tasks using one backup-id on spider master`, + SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { if err := logger.InitLog("dbbackup_spider.log"); err != nil { return err @@ -125,9 +125,10 @@ var spiderScheduleCmd = &cobra.Command{ } var spiderCheckCmd = &cobra.Command{ - Use: "check", - Short: "spiderbackup check", - Long: `Check or run backup todo tasks`, + Use: "check", + Short: "spiderbackup check", + Long: `Check or run backup todo tasks`, + SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { if err := logger.InitLog("dbbackup_spider.log"); err != nil { return err @@ -136,7 +137,11 @@ var spiderCheckCmd = &cobra.Command{ if err != nil { return err } - publicConfigs, err := batchParseCnfFiles(cnfFiles) + configFile, err := findSpiderBackupConfigFile(cnfFiles) + if err != nil { + return err + } + publicConfigs, err := batchParseCnfFiles([]string{configFile}) if err != nil { return err } @@ -150,9 +155,10 @@ var spiderCheckCmd = &cobra.Command{ } var spiderQueryCmd = &cobra.Command{ - Use: "query", - Short: "spiderbackup query", - Long: `Query spider backup task status, only run on spider master`, + Use: "query", + Short: "spiderbackup query", + Long: `Query spider backup task status, only run on spider master`, + SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { if err := logger.InitLog("dbbackup_spider.log"); err != nil { return err diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/bin/tokudb_back.pl b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/bin/tokudb_back.pl new file mode 100644 index 0000000000..a890fd6d36 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/bin/tokudb_back.pl @@ -0,0 +1,553 @@ +#!/usr/bin/perl + +use FindBin qw($Bin); + +use warnings; +use strict; +use Getopt::Long qw(:config posix_default); +use POSIX qw(strftime); +use File::Copy qw(copy); +use Data::Dumper; +use DBI; +use File::Find; +no warnings 'File::Find'; + +my $user=''; +my $password=''; +my $sock=''; +my $host='localhost'; +my $port=3306; +my $increment=0; +my $lock_wait_timeout = undef; +my $dump_slave = undef; +my $global_time_offset=0; +my @tokudb_backup_warnings; +sub tmysql_version_parser { + my $mysql_version = shift; + if($mysql_version =~ /tmysql/){ + return sprintf( '%03d%03d%03d', $mysql_version =~ m/tmysql-([012345678])\.(\d+)\.*(\d*)/g ); + }else{ + return '000000000'; + } +} + +Getopt::Long::Configure ("bundling_override"); +GetOptions( + 'host|h=s' => \$host, + 'password|p=s' => \$password, + 'port|P=s' => \$port, + 'user|u=s' => \$user, + 'socket|S=s' => \$sock, + 'increment|i!' => \$increment, + 'flush-wait-timeout=s' => \$lock_wait_timeout, + 'dump-slave!' => \$dump_slave +)or die "usage: xxx -u user -p password -h host -P port [-i|increment] backdir/target_name(when enable increment,there shoud be previous file in backdir) global_time_offset"; + + +my $backdir= $ARGV[0]; + +if( -d $backdir){ +}else{ + mkdir("$backdir") or die "mkdir $backdir fail"; +} +if(defined $ARGV[1] and $ARGV[1]=~ /\s*(\d+)\s*/){ + $global_time_offset=$1; +} +mkdir("$backdir/mysql_data"); +mkdir("$backdir/tokudb_data"); +mkdir("$backdir/tokudb_log"); + +if ($sock =~ /.+\/(\d+)\// ) { + $port = $1; +} + +my $data_dir = undef; +my $tokudb_data_dir = undef; +my $tokudb_log_dir = undef; +my $myconf = ($port == 3306)? "/etc/my.cnf" : "/etc/my.cnf.$port"; + +open( my $my_cnf_fp, "< $myconf" ) or die "$myconf open error" ; +while ( my $line = <$my_cnf_fp> ) { + chomp $line; + if ( not defined $data_dir ) { + if ( $line =~ /\s*datadir\s*=([\S]+)/ ) { + $data_dir = $1; + next; + } + } + if ( not defined $tokudb_data_dir ) { + if ( $line =~ /\s*tokudb_data_dir\s*=([\S]+)/ ) { + $tokudb_data_dir = $1; + next; + } + } + if ( not defined $tokudb_log_dir ) { + if ( $line =~ /\s*tokudb_log_dir\s*=([\S]+)/ ) { + $tokudb_log_dir = $1; + next; + } + } +} +close($my_cnf_fp); + +unless (defined $data_dir && + defined $tokudb_data_dir && + defined $tokudb_log_dir ) +{ + die "some key in $myconf lost"; +} + +my $fully_stamp=(time)+$global_time_offset; +#system("date '+%s' > $backdir/TOKUDB.BEGIN"); +my $date=strftime("%Y%m%d_%H%M%S", localtime($fully_stamp)); + +my $backed_debug_log_file="$backdir/debug_log.txt"; +my $debug_log_file="$Bin/debug_log_tokudb.txt"; +open my $backed_debug_log,">$backed_debug_log_file" or die "failed to open $backed_debug_log_file for:$!\n"; +open my $debug_log,">>$debug_log_file" or die "failed to open $debug_log_file for:$!\n"; +print $debug_log "\nport:$port time:$date\n"; +print $backed_debug_log "\nport:$port time:$date\n"; + +system("echo $date >> $backdir/TOKUDB.BEGIN"); +my $tname=( split(/\//,$ARGV[0]) )[-1]; +my $dir; +if($ARGV[0] =~ /(.+)\/${tname}$/){ + $dir=$1; +}else{ + die "failed while parsing backdir"; +} +my $fully_name=$tname; +my $dbh = DBI->connect ("DBI:mysql:mysql:host=$host:port=$port:mysql_socket=$sock", $user, $password); + +my $sql = qq{ select version() }; +my $row_ref = $dbh->selectrow_arrayref($sql); +my $tmysql_ver = tmysql_version_parser($row_ref->[0]); + +if($tmysql_ver lt tmysql_version_parser("tmysql-2.1.3")){ + die "tokudb physical backup only avaliable above tmysql-2.1.3"; +} + +if(defined $lock_wait_timeout and $lock_wait_timeout ne '0'){ + $dbh->do("SET LOCK_WAIT_TIMEOUT=$lock_wait_timeout;") or die "Set lock_wait_timeout failed!"; +} + +##### STEP 1. set tokudb_commit_sync=1:close redo log buffer +#my $ret=$dbh->do("start transaction"); +#die "unable to start transaction for close redo log buffer" if not defined $ret or $ret<0; +#$sql = qq{ select \@\@tokudb_commit_sync }; +#$row_ref = $dbh->selectrow_arrayref($sql); +#my $old_global_tokudb_commit_sync=$row_ref->[0]; +#$ret=$dbh->do("set global tokudb_commit_sync=1"); +#die "unable to set global tokudb_commit_sync=1" if not defined $ret or $ret<0; +#$ret=$dbh->do("commit"); +#die "unable commit set global tokudb_commit_sync=1" if not defined $ret or $ret<0; +#print qx(date); +#print "unable redo log buffer ok\n\n"; + + +##### STEP 2. get METADATA lock for each table +if($tmysql_ver ge tmysql_version_parser("tmysql-2.1.3")){ + $dbh->do("lock tables for backup;") or die "Get metadata lock failed"; + print $debug_log qx(date); + print $debug_log "get metadata lock ok\n\n"; +} + +my @white_databases=qw(information_schema db_infobase mysql performance_schema test sys); +my @backup_databases; +my @other_engines; +eval{ + local $SIG{__DIE__} = ""; + $sql="show databases"; + my $select=$dbh->prepare($sql); + $select->execute(); + while(my $result=($select->fetchrow_array)[0]){ + if(not grep(/$result/,@white_databases)){ + push(@backup_databases,$result); + } + } + if(@backup_databases){ + my $database_string=join("','",@backup_databases); + $sql="select distinct engine from information_schema.tables where table_schema in('$database_string')"; + $select=$dbh->prepare($sql); + $select->execute(); + while(my @row_array=$select->fetchrow_array){ + my ($engine)=@row_array; + if(not $engine eq 'TokuDB'){ + push(@other_engines,$engine); + } + } + } +}; +if($@){ + die "check engine for all backup table failed:$@\n"; +} + + +my $metadata_before_backup=qx(ls -l $data_dir $tokudb_log_dir/*); +##### STEP 3. get checkpoint lock +#set tokudb_checkpoint_lock=on:let dml only change redo log +$row_ref=$dbh->selectrow_arrayref("select * from information_schema.global_variables where variable_name='tokudb_checkpoint_lock'"); +print $debug_log "global:$row_ref->[0]:$row_ref->[1]\n"; +$row_ref=$dbh->selectrow_arrayref("select * from information_schema.session_variables where variable_name='tokudb_checkpoint_lock'"); +print $debug_log "session:$row_ref->[0]:$row_ref->[1]\n"; +$dbh->do("SET TOKUDB_CHECKPOINT_LOCK=ON;") or die "Get tokudb_checkpoint lock failed"; +print $debug_log qx(date); +$row_ref=$dbh->selectrow_arrayref("select * from information_schema.global_variables where variable_name='tokudb_checkpoint_lock'"); +print $debug_log "global:$row_ref->[0]:$row_ref->[1]\n"; +$row_ref=$dbh->selectrow_arrayref("select * from information_schema.session_variables where variable_name='tokudb_checkpoint_lock'"); +print $debug_log "session:$row_ref->[0]:$row_ref->[1]\n"; +print $debug_log "set tokudb_checkpoint_lock=on ok\n\n"; + + +##### STEP 4. Copy tokudb.* and redo log, and get binlog position in a blocking-binlog or stopping-slave; +if(defined $dump_slave){ + $dbh->do("set \@old_rpl_stop_slave_timeout=\@\@rpl_stop_slave_timeout;") or die("failed to get the value of rpl_stop_slave_timeout"); + print($debug_log "rpl_stop_slave_timeout:".(($dbh->selectrow_arrayref("select \@\@rpl_stop_slave_timeout"))->[0])."\n"); + $dbh->do("set global rpl_stop_slave_timeout=10;") or die("failed to set the value of rpl_stop_slave_timeout"); + print($debug_log qx(date)); + my $ret=$dbh->do("stop slave;"); + print($debug_log qx(date)."\n"); + $dbh->do("set global rpl_stop_slave_timeout=\@old_rpl_stop_slave_timeout;") or die("failed to recover the value of rpl_stop_slave_timeout"); + if(not defined $ret){#Assuming that there are dead locks,and stop slave time out + my $dbh = DBI->connect ("DBI:mysql:mysql:host=$host:port=$port:mysql_socket=$sock", $user, $password); + $ret=qx(mysql -h $host -P $port -u$user -p$password -e "show full processlist") or die "failed to execute 'show full processlist':$!"; + print($debug_log $ret); + $dbh->do("SET TOKUDB_CHECKPOINT_LOCK=OFF;") or die "release tokudb_checkpoint lock failed"; + system("rm -rf $backdir/mysql_data")==0 or die "failed:$!"; + close $debug_log; + close $backed_debug_log; + exit(223); + } + print $debug_log qx(date); + print $debug_log "stop slave ok\n\n"; +}else{ + $dbh->do("lock binlog for backup;") or die "lock binlog for backup failed"; + print $debug_log qx(date); + print $debug_log "lock binlog for backup ok\n\n"; +} + +print $backed_debug_log "Binlog time: " . qx(date)."\n"; +print $debug_log "Binlog time: " . qx(date)."\n"; +my $binlog_stamp=time; + +print $debug_log qx(date); +print $debug_log "copy tokudb redolog in: $tokudb_log_dir ..."; +system("ls $tokudb_log_dir| xargs -I '{}' cp -r $tokudb_log_dir/{} $backdir/tokudb_log")==0 or die "failed:$!\n"; +print $debug_log "\tdone.\n"; +print $debug_log qx(date)."\n"; + +print $debug_log qx(date); +print $debug_log "copy tokudb rollback log ..."; +system("ls $data_dir/tokudb.*| xargs -I '{}' cp -r {} $backdir/")==0 or die "failed:$!"; +print $debug_log "\tdone.\n"; +print $debug_log qx(date)."\n"; + +#get tokudb_data list +my @tokudb_data_files; +open FILELIST,">$backdir/tokudb_data/filelist.txt" or die "can't open $backdir/tokudb_data/filelist.txt:$!\n"; +my $select_file=$dbh->prepare("select distinct internal_file_name from information_schema.TokuDB_file_map"); +$select_file->execute(); +while(my @tokudb_files=$select_file->fetchrow_array){ + push(@tokudb_data_files,$tokudb_files[0]); + printf(FILELIST "$tokudb_files[0]\n"); +} +close FILELIST; + +$sql = qq{show master status}; +$row_ref = $dbh->selectrow_hashref($sql); +if(defined $row_ref and defined $row_ref->{'File'} and defined $row_ref->{'Position'} ){ + open(my $master_info, ">", "$backdir/xtrabackup_binlog_info") or die "could not open binlog_info file\n"; + print $master_info "$row_ref->{'File'} $row_ref->{'Position'}\n"; + close $master_info; +}else{ + die "Get master info failed\n"; +} + +if(defined $dump_slave){ + $sql = qq{show slave status}; + $row_ref = $dbh->selectrow_hashref($sql); + if (defined $row_ref and defined $row_ref->{'Relay_Master_Log_File'} and defined $row_ref->{'Exec_Master_Log_Pos'}) + { + open(my $slave_info, ">", "$backdir/xtrabackup_slave_info") or die "could not open slave_info file"; + print $slave_info "CHANGE MASTER TO MASTER_LOG_FILE='$row_ref->{Relay_Master_Log_File}', MASTER_LOG_POS=$row_ref->{'Exec_Master_Log_Pos'} \n"; + close $slave_info; + }else{ + die "Get salve info failed\n"; + } +} + +print $debug_log qx(date); +print $debug_log "copy mysql configure file: $myconf ..."; +system("cp $myconf $backdir/backup-my.cnf") ==0 or die "failed:$!\n"; +print $debug_log "\tdone.\n"; +print $debug_log qx(date)."\n"; + +if(defined $dump_slave){ + $dbh->do("start slave") or die "start slave faild\n"; + print $debug_log qx(date); + print $debug_log "start slave ok,time:".(qx(date))."\n"; +}else{ + $dbh->do("unlock binlog") or die "start slave faild\n"; + print $debug_log qx(date); + print $debug_log "unlock binlog ok,time:".(qx(date))."\n"; +} + + +##### STEP 5. Copy frm +print $debug_log qx(date); +print $debug_log "copy mysql data dir: $data_dir/ ..."; +system("ls $data_dir| xargs -I '{}' cp -r $data_dir/{} $backdir/mysql_data")==0 or die "failed:$!"; +print $debug_log "\tdone.\n"; +print $debug_log qx(date)."\n"; + +##### STEP 6. recovery tokudb_commit_sync; +#$ret=$dbh->do("set global tokudb_commit_sync=$old_global_tokudb_commit_sync"); +#die "unable to restore tokudb_commit_sync" if not defined $ret or $ret<0; +##sleep(100); +#print qx(date); +#print "enable tokudb redo log buffer ok\n\n"; +# +#goto SKIP; +##### STEP 7. copy data or increment data +if(is_low_space("$Bin/history_backup_size","$Bin/last_backup_size",$backdir,0.96,$debug_log)){ + push(@tokudb_backup_warnings,"SMS#low space,previous file for port:${port} will be deleted,and a fully backup will be done today"); + #delete old file + print $debug_log "\tlow space,delete old file and then fully backup"; + qx(ls $backdir|grep _${port}_|grep _${host}_|xargs rm -rf); + #fully backup + foreach my $file(@tokudb_data_files){ + system("cp -r $tokudb_data_dir/$file $backdir/tokudb_data") ==0 or die "copy $tokudb_data_dir/$file to $backdir/tokudb_data failed:$!"; + } +}elsif(not $increment){ + print $debug_log qx(date); + print $debug_log "fully copy tokudb data dir: $tokudb_data_dir/ ..."; + foreach my $file(@tokudb_data_files){ + system("cp -r $tokudb_data_dir/$file $backdir/tokudb_data") ==0 or die "copy $tokudb_data_dir/$file to $backdir/tokudb_data failed:$!"; + } + #system("ls $tokudb_data_dir|xargs -I '{}' cp -r $tokudb_data_dir/{} $backdir/tokudb_data") ==0 or die "failed"; +}else{ + print $debug_log qx(date); + print $debug_log "incrementally backup($tokudb_data_dir/)"; + my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst)=localtime(time()+$global_time_offset); + if(($port%7)==$wday){ #default fully backup day,20000%7=1 + print $debug_log "\tfully day"; + foreach my $file(@tokudb_data_files){ + system("cp -r $tokudb_data_dir/$file $backdir/tokudb_data") ==0 or die "copy $tokudb_data_dir/$file to $backdir/tokudb_data failed:$!"; + } + #system("ls $tokudb_data_dir|xargs -I '{}' cp -r $tokudb_data_dir/{} $backdir/tokudb_data") ==0 or die "failed"; + }else{ +#1 get the fully backup up info file + print $debug_log "\tincremental day"; + opendir (DIR,$dir) or die "can't open the directory $dir"; + my @dirs=readdir DIR; + close DIR; + + #my $fully_backup_port=$wday; + #while($fully_backup_port<$port){$fully_backup_port+=7;} + #my $days_since_fully=$fully_backup_port-$port; + +# time = strftime("%Y%m%d_%H%M%S", localtime(time)); +# target_name = "${app_name}_${hostname}_${host_address}_${port}_${time}"; +# fully day info file name should be: \S+_${host}_${port}_${fullyday}_\d{6}\.info + #print "\tport:$port fully_backup_port:$fully_backup_port days_since_fully:$days_since_fully"; + my $tmpstamp; + my $tmpname; + my $i; + my $fully_stamp_and_name={}; + for($i=0;$i<7;$i++){ + my $fullyday=strftime("%Y%m%d", localtime($fully_stamp-$i*24*3600)); + #print "\$port:$port \$fully_backup_port:$fully_backup_port \$days_since_fully:$days_since_fully \$fullyday:$fullyday\n"; + #print "now begin to exam ".(scalar @dirs)." file\n"; + foreach my $file(@dirs){ + chomp($file); + if($file =~ /(\S+_${port}_${fullyday}_\d{6}_TOKUDB_INCREASE)\.info$/){ + my $current_name=$1; + open INFO,"<$dir/$file" or print $debug_log "unable to open $dir/$file",last; + #print "\nexaming file: $file\n"; + while(){ + chomp; + my $tmpstr=$_; + if($tmpstr =~ /\s*FULLY_STAMP\s*=\s*(\d+)/){ + $tmpstamp=$1; + }elsif($tmpstr=~/\s*FULLY_NAME\s*=\s*(\S+)/){ + $tmpname=$1; + } + } + close INFO; + if(defined $tmpstamp and defined $tmpname and $tmpname eq $current_name){#find fully backup info file + $fully_stamp_and_name->{$tmpstamp}=$tmpname; + } + $tmpstamp=undef; + $tmpname=undef;#for multi fully backup one day + } + } + if(scalar keys %$fully_stamp_and_name){ + last; + } + } + my @fully_stamps=sort keys %$fully_stamp_and_name; + if(@fully_stamps){ #previous backup is ok + print $debug_log "\tincremental backup"; + $fully_stamp=$fully_stamps[-1]; + $fully_name=$fully_stamp_and_name->{$fully_stamp}; + print $debug_log "\nfully_stamp:$fully_stamp\nfully_name:$fully_name\n"; + foreach my $file(@tokudb_data_files){ + my($device, $inode, $mode, $nlink, $uid, $gid, $rdev, $size,$atime, $mtime, $ctime, $blksize, $blocks) = stat("$tokudb_data_dir/$file"); + #printf("$file\tmtime:$mtime($fully_stamp)\tctime:$ctime($fully_stamp)\n"); + if($mtime>$fully_stamp ||($ctime>$fully_stamp)){ + system("cp $tokudb_data_dir/$file $backdir/tokudb_data") ==0 or die "failed:$!"; + #printf("copied $file\n"); + } + } + }else{ #previous backup failed + #print "\tfully backup(i:$i fully_stamp:$fully_stamp)"; + print $backed_debug_log "\ttmpstamp:$tmpstamp" if defined $tmpstamp; + print $debug_log "\ttmpstamp:$tmpstamp" if defined $tmpstamp; + foreach my $file(@tokudb_data_files){ + system("cp -r $tokudb_data_dir/$file $backdir/tokudb_data") ==0 or die "copy $tokudb_data_dir/$file to $backdir/tokudb_data failed:$!"; + } + #system("ls $tokudb_data_dir|xargs -I '{}' cp -r $tokudb_data_dir/{} $backdir/tokudb_data") ==0 or die "failed"; + } + } +} +SKIP: +print $debug_log "\tdone.\n"; +print $debug_log qx(date)."\n"; + +##### STEP 8. check file change time +my $metadata_after_backup=qx(ls -l $data_dir $tokudb_log_dir/*); +print $backed_debug_log "############################################# metadata before backup ######################################################\n"; +print $backed_debug_log $metadata_before_backup; +print $backed_debug_log "############################################# metadata after backup ######################################################\n"; +print $backed_debug_log $metadata_after_backup; +print $debug_log "############################################# metadata before backup ######################################################\n"; +print $debug_log $metadata_before_backup; +print $debug_log "############################################# metadata after backup ######################################################\n"; +print $debug_log $metadata_after_backup; +#begin check that all file's ctime should not be new than $binlog_stamp +my @changed_file; +opendir (DIR,$tokudb_data_dir) or die "can't open the directory $dir"; +my @dirs=readdir DIR; +close DIR; +print $backed_debug_log "#################################################### changed file ###########################################################\n"; +print $debug_log "#################################################### changed file ###########################################################\n"; +foreach my $file(@dirs){ + if($file eq "\." or $file eq "\.\."){next;} + my($device, $inode, $mode, $nlink, $uid, $gid, $rdev, $size,$atime, $mtime, $ctime, $blksize, $blocks) = stat("$tokudb_data_dir/$file"); + if($mtime>$binlog_stamp){ + my $full_file_path="$tokudb_data_dir/$file"; + my $ls_l_of_file=qx(ls -l --time-style='+%Y-%m-%d %H:%M:%S' $full_file_path); + my $adate=strftime("%Y-%m-%d %H:%M:%S", localtime($atime)); + my $mdate=strftime("%Y-%m-%d %H:%M:%S", localtime($mtime)); + my $cdate=strftime("%Y-%m-%d %H:%M:%S", localtime($ctime)); + my $bdate=strftime("%Y-%m-%d %H:%M:%S", localtime($binlog_stamp)); + chomp($ls_l_of_file); + print $backed_debug_log "$ls_l_of_file\tchanged:a($adate) m($mdate) c($cdate) b($bdate)\n"; + print $debug_log "$ls_l_of_file\tchanged:a($adate) m($mdate) c($cdate) b($bdate)\n"; + #print "$tokudb_data_dir/$file changed\n"; + push(@changed_file,"$tokudb_data_dir/$file"); + } +} + +##### STEP 9. release metadata lock +if($tmysql_ver ge tmysql_version_parser("tmysql-2.1.3")){ + $dbh->do("unlock tables") or die "failed to release metadata lock"; + print $debug_log qx(date); + print $debug_log "release all metadata lock ok\n\n"; +} + + +##### STEP 10. release tokudb_checkpoint_lock +$dbh->do("SET TOKUDB_CHECKPOINT_LOCK=OFF") or die "release tokudb checkpoint lock failed"; +$row_ref=$dbh->selectrow_arrayref("select * from information_schema.global_variables where variable_name='tokudb_checkpoint_lock'"); +print $debug_log "global:$row_ref->[0]:$row_ref->[1]\n"; +$row_ref=$dbh->selectrow_arrayref("select * from information_schema.session_variables where variable_name='tokudb_checkpoint_lock'"); +print $debug_log "session:$row_ref->[0]:$row_ref->[1]\n"; +print $debug_log qx(date); +print $debug_log "set tokudb_checkpoint_lock=off ok\n\n"; +$dbh->disconnect; + + +##### STEP 11. fully backup info to file +#save fully backup info to file +open my $latest_fully_backup_info,">$Bin/latest_fully_backup_info.$port" +or die("ERROR: Can not open latest fully backup info file $Bin/latest_fully_backup_info.$port for $@"); +printf $latest_fully_backup_info "FULLY_STAMP=$fully_stamp\n"; +printf $latest_fully_backup_info "FULLY_NAME=$fully_name\n"; +close($latest_fully_backup_info); +if(scalar @changed_file){ + #print "changed file:".(join(',',@changed_file))."\n"; + my $file_string=join(",",@changed_file); + print $backed_debug_log "changed file:$file_string\n"; + print $debug_log "changed file:$file_string\n"; + push(@tokudb_backup_warnings,"INFO#warning:some file changed during backup,see more information in $debug_log_file"); +} +if(scalar @other_engines){ + my $other_engine_string=join(",",@other_engines); + my $backup_databases_string=join(",",@backup_databases); + push(@tokudb_backup_warnings,"SMS#engine other than tokudb exists, other engine:$other_engine_string database:$backup_databases_string"); + print "warning:engine other than tokudb($other_engine_string) exists in $backup_databases_string\n"; +} +print_log("$Bin/tokudb_backup.log.$port",@tokudb_backup_warnings); +close $backed_debug_log; +close $debug_log; +#system("date '+%s'> $backdir/TOKUDB.END"); +$date=strftime("%Y%m%d_%H%M%S", localtime(time()+$global_time_offset)); +system("echo $date >> $backdir/TOKUDB.END"); + +sub print_log{ +#logarray entry format:level#message + my($logfile,@logarray)=@_; + if(scalar @logarray){ + open LOG,">$logfile" or die "unable to open $logfile"; + foreach my $log(@logarray){ + print LOG $log; + } + close LOG; + }else{ + if(-e $logfile){ + system("rm $logfile")==0 or print "WARNING:failed to rm previous logfile:$logfile:$!\n"; + } + } +} +sub get_history_max_backup_size{ + my($history_file)=@_; + if(-e $history_file and -r $history_file){ + my $max=qx(cat $history_file|grep _${port}_|tail -7|awk '{print \$2}'|sort -n|tail -1); + chomp($max) if defined $max; + if(defined $max and $max=~/\d+/){ + return $max; + }else{ + return 0; + } + }else{ + return 0; + } +} +sub is_low_space{ + my($history_file,$last_file,$backup_dir,$max_percent,$debug_log)=@_; + my $max_size=get_history_max_backup_size($history_file); + my $last_size=get_history_max_backup_size($last_file); + my $max=($max_size>$last_size)?$max_size:$last_size; + if($max==0){ + return 0; + }else{ + my $backup_disk = (split(/\//, $backup_dir))[1]; + my $disk_total = qx#df -P /$backup_disk | sed '1d' | awk '{print \$2}'#; + my $disk_used = qx#df -P /$backup_disk | sed '1d' | awk '{print \$3}'#; + chomp $disk_total; #in kB + chomp $disk_used; #in kB + $disk_total *= 1024; #to Byte + $disk_used *= 1024; #to Byte + my $space_may_used=$disk_used+$max; + my $warn_level=$disk_total*$max_percent; + print $debug_log "total-space:${disk_total}B used-space:${disk_used}B backup-may-use:${max}B warn-level:${warn_level}B \n"; + if($space_may_used>$warn_level){ + print $debug_log "use-space + space-backup-may-use=${space_may_used}B > warn_level=${warn_level}B\n"; + return 1; + }else{ + print $debug_log "use-space + space-backup-may-use=${space_may_used}B <= warn_level=${warn_level}B\n"; + return 0; + } + } +} \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/bin/tokudb_recovery.pl b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/bin/tokudb_recovery.pl new file mode 100644 index 0000000000..71af7443ba --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/bin/tokudb_recovery.pl @@ -0,0 +1,100 @@ +#!/usr/bin/perl + +use FindBin qw($Bin); + +use warnings; +use strict; +use Getopt::Long qw(:config posix_default); +use POSIX qw(strftime); +use File::Basename; +use Data::Dumper; +use DBI; + + +my $backdir= undef; +my $myconf = undef; +my $increment=0; + +my @normal_dbs = ("mysql", "test", "performance_schema", "db_infobase", "sys"); + +Getopt::Long::Configure ("bundling_override"); +GetOptions( + 'backup-path=s' => \$backdir, + 'defaults-file=s' => \$myconf, + 'increment|i!' => \$increment +)or die "usage: xxx --backup-path={path} --defaults=file={path} [<-i|increment> ]"; + + +unless (defined $backdir && defined $myconf){ + die "back-path or defaults-file undefined"; +} + +die "$backdir is not a dir" unless( -d $backdir); +my $fully_dir=undef; +if($increment){ + die "when enable increment, fully-backup-file-dir is needed!" unless(@ARGV==1); + die "$ARGV[0] is not a dir" unless(-d $ARGV[0]); + $fully_dir=$ARGV[0]; +} +my $data_dir = undef; +my $tokudb_data_dir = undef; +my $tokudb_log_dir = undef; + +open( my $my_cnf_fp, "< $myconf" ) or die "$myconf open error" ; +while ( my $line = <$my_cnf_fp> ) { + chomp $line; + if ( not defined $data_dir ) { + if ( $line =~ /\s*datadir\s*=\s*([\S]+)/ ) { + $data_dir = $1; + next; + } + } + if ( not defined $tokudb_data_dir ) { + if ( $line =~ /\s*tokudb_data_dir\s*=\s*([\S]+)/ ) { + $tokudb_data_dir = $1; + next; + } + } + if ( not defined $tokudb_log_dir ) { + if ( $line =~ /\s*tokudb_log_dir\s*=\s*([\S]+)/ ) { + $tokudb_log_dir = $1; + next; + } + } +} +close($my_cnf_fp); + +unless (defined $data_dir && + defined $tokudb_data_dir && + defined $tokudb_log_dir ) +{ + die "some key in $myconf lost"; +} + +system("ls $backdir/tokudb_log|xargs -I '{}' cp -r $backdir/tokudb_log/{} $tokudb_log_dir") == 0 or die "failed"; +if($increment){ + if(!open FILELIST,"<$backdir/tokudb_data/filelist.txt"){ + die "open filelist.txt failed"; + } + while(){ + chomp; + my $file=$_; + if(-e "$backdir/tokudb_data/$file"){ + $file="$backdir/tokudb_data/$file"; + }else{ + die "can't find $file in both $backdir/tokudb_data and $fully_dir/tokudb_data" unless(-e ($file="$fully_dir/tokudb_data/$file")); + } + system("cp $file $tokudb_data_dir") == 0 or die "failed"; + } +}else{ + eval{system("ls $backdir/tokudb_data |xargs -I '{}' cp -r $backdir/tokudb_data/{} $tokudb_data_dir")}; die $@ if $@; +} +system("ls $backdir/tokudb.*|xargs -I '{}' cp {} $data_dir") == 0 or die "failed"; + +foreach my $file (glob("$backdir/mysql_data/*/")){ + my $tmp_file=basename($file); + if(not grep(/$tmp_file/,@normal_dbs) ){ + print "copy database: $tmp_file\n"; + system("cp -r $file $data_dir") == 0 or die "failed"; + } +} \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/lib/.gitkeep b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup-go-deps-txsql/lib/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup_main.sh b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup_main.sh index dbfe710d54..237dad7b0b 100755 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup_main.sh +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/dbbackup_main.sh @@ -92,8 +92,8 @@ errPorts="" okPorts="" for conf_file in $configFiles do - #port=`echo $conf_file |awk -F. '{print $(NF-1)}'` - port=`grep MysqlPort $conf_file |grep -Ev "#|MysqlPort = 0" |head -1 | cut -d= -f2` + port=`grep -Ei 'MysqlPort|\[Public\]' $conf_file |grep -v '^#' | grep -A1 'Public' |grep 'MysqlPort'` + port=$(echo `echo $port |head -1 | cut -d= -f2`) echo "now doing dbbackup for config file=$conf_file port=$port" echo "${scriptDir}/dbbackup dumpbackup --config=$conf_file $dbbackupOpt 2>&1 >> $logfile" ${scriptDir}/dbbackup dumpbackup --config=$conf_file $dbbackupOpt 2>&1 >> $logfile @@ -104,7 +104,7 @@ do fi done # 输出可用于判断哪些 ports 成功,哪些失败 -echo "okPorts:$okPorts,errPorts:$errPorts" +echo "okPorts:$okPorts, errPorts:$errPorts" if [ -n "$errPorts" ];then echo "ports backup failed: $errPorts" >&2 exit 1 diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/docs/faq.md b/dbm-services/mysql/db-tools/mysql-dbbackup/docs/faq.md index 2bcb03b016..197c01ed08 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/docs/faq.md +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/docs/faq.md @@ -74,9 +74,40 @@ IOLimitMBPerSec = 300 ### 6. 关于 tendbcluster 集群备份,请参考 [spider](spiderbackup.md) -### 常见备份失败处理 +### 7. 关于逻辑备份字符集说明 -1. log copying being too slow -it looks like InnoDB log has wrapped around before xtrabackup could process all records due to either log copying being too slow, or log files being too small. - 实例写入速度比备份速度快,可能是刚刚备份时间段有大批量 DML 操作,修改备份开始时间,或者加快备份速度: - - 调大 PhysicalBackup.Throttle, 调大 PhysicalBackup.Threads \ No newline at end of file + +### 8. 常见备份失败处理 + +#### 1. log copying being too slow +> it looks like InnoDB log has wrapped around before xtrabackup could process all records due to either log copying being too slow, or log files being too small. + +实例写入速度比备份速度快,可能是刚刚备份时间段有大批量 DML 操作,修改备份开始时间,或者加快备份速度: + +调大 PhysicalBackup.Throttle, 调大 PhysicalBackup.Threads + +#### 2. mydumper 不支持 centos 6.x +> mydumper: error while loading shared libraries: libpcre.so +> +> /lib64/libc.so.6: version `GLIBC_2.14' not found (required by xxx) + +mydumper / myloader 依赖 glibc>=2.14, centos 6.x(or tlinux 1.2) 是 glibc 2.12,可能会报如上错误。查看 glibc 版本`ldd --version |grep libc`。 + +如果必须使用逻辑备份,可以设置 +``` +[LogicalBackup] +UseMysqldump = auto +Databases = * +Tables = * + +[Public] +BackupType = logical +``` +使用 mysqldump 备份 slave 数据,会短暂停止同步 sql thread来获取一致性 binlog 位点,可能会触发告警。备份结束(成功/失败)会自动恢复 sql thread(非 kill 掉的情况) + +#### 3. xtrabackup 8.0 不支持 centos 6.x +> xtrabackup: error while loading shared libraries: libsystemd.so.0: cannot open shared object file: No such file or directory +> +> /usr/lib64/libstdc++.so.6: version `GLIBCXX_3.4.15' not found (required by xxx) + +mysql 8.0 的物理备份工具 xtrabackup 也依赖 glibc>=2.14 版本,可能会看到如上报错。 diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/docs/readme.md b/dbm-services/mysql/db-tools/mysql-dbbackup/docs/readme.md index 0730280752..e56ede465f 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/docs/readme.md +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/docs/readme.md @@ -324,8 +324,13 @@ DefaultsFile = /etc/my.cnf.3306 ## 参数解释 ### Public - Public.KillLongQueryTime - 发起备份前检查长 sql,如果超过这个时间则马上自动 kill。需要备份账号有 super 权限 - 默认为 0 则不 kill,对 mydumper / xtrabackup 有效 + 这个参数对逻辑备份和物理备份作用不同 + - 逻辑备份 mydumper + 相当于`--kill-long-queries --long-query-guard xx`: 发出 FTWRL 之前如果发现有超过这个时间的长 sql,则 kill 掉 + - 物理备份 xtrabackup + 相当于`--kill-long-queries-timeout=xx`: 发出 FTWRL 之后如果被阻塞,则等待多久之后把引起阻塞的长 sql kill 掉 + +需要备份账号有 super 权限。默认为 0 则不 kill。 - Public.FtwrlWaitTimeout 发起备份前检查长 sql,(如果不自动 kill/ kill失败) 则等待长 sql 多久后,放弃 ftwrl,放弃备份。 diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config/public.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config/public.go index 47732a561c..93a26e8ed0 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config/public.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config/public.go @@ -75,12 +75,13 @@ type Public struct { // EncryptOpt backup files encrypt options EncryptOpt *cmutil.EncryptOpt `ini:"EncryptOpt"` - // KillLongQueryTime 如果有长 sql,则等待长 sql 多久后,kill掉长 sql. if 0 not kill. Default 0 - // mydumper --kill-long-queries --long-query-guard xx - // xtrabackup --kill-long-queries-timeout + // KillLongQueryTime . if 0 not kill. Default 0 + // mydumper --kill-long-queries --long-query-guard xx: 发出 FTWRL 之前如果发现有超过这个时间的长 sql,则 kill 掉 + // xtrabackup --kill-long-queries-timeout: 发出 FTWRL 之后如果被阻塞,则等待多久之后把引起阻塞的长 sql kill 掉 KillLongQueryTime int `ini:"KillLongQueryTime"` - // FtwrlWaitTimeout 如果有长 sql,则等待长 sql 多久后,放弃 ftwrl,放弃备份(FTWRL 还没运行). Default 120s - // xtrabackup --ftwrl-wait-timeout + // FtwrlWaitTimeout 在发出 FTWRL 之前,如果有长 sql,则等待长 sql 多久后,放弃 ftwrl,放弃备份. Default 120s + // 长 sql 的判断标准是 running time > --ftwrl-wait-threshold ( --lock-wait-threshold for 5.6) + // xtrabackup --ftwrl-wait-timeout (--lock-wait-timeout for 5.6) // mydumper --long-query-guard FtwrlWaitTimeout int `ini:"FtwrlWaitTimeout"` // AcquireLockWaitTimeout If LOCK TABLES FOR BACKUP does not return within given timeout, abort the backup. diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical.go index 9c901c66c9..d8a5a1dd09 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical.go @@ -104,19 +104,27 @@ func (p *PhysicalDumper) buildArgs() []string { } if strings.Compare(p.mysqlVersion, "005007000") > 0 { args = append(args, "--lock-ddl") - if p.cnf.Public.AcquireLockWaitTimeout > 0 { - args = append(args, fmt.Sprintf("--lock-ddl-timeout=%d", p.cnf.Public.AcquireLockWaitTimeout)) + if strings.Compare(p.mysqlVersion, "008000000") < 0 { // ver >=5.7 and ver < 8.0 + args = append(args, "--binlog-info=ON") } - if p.cnf.Public.FtwrlWaitTimeout > 0 { + } + + if p.cnf.Public.KillLongQueryTime > 0 { // all version support + args = append(args, fmt.Sprintf("--kill-long-queries-timeout=%d", p.cnf.Public.KillLongQueryTime)) + } + if p.cnf.Public.FtwrlWaitTimeout > 0 { + if strings.Compare(p.mysqlVersion, "005007000") >= 0 { args = append(args, fmt.Sprintf("--ftwrl-wait-timeout=%d", p.cnf.Public.FtwrlWaitTimeout)) - } - if strings.Compare(p.mysqlVersion, "008000000") < 0 { - args = append(args, "--binlog-info=ON") // ver >=5.7 and ver < 8.0 + } else { // 5.5, 5.6 + args = append(args, fmt.Sprintf("--lock-wait-timeout=%d", p.cnf.Public.FtwrlWaitTimeout)) } } - if strings.Compare(p.mysqlVersion, "005006000") > 0 { - if p.cnf.Public.KillLongQueryTime > 0 { - args = append(args, fmt.Sprintf("--kill-long-queries-timeout=%d", p.cnf.Public.KillLongQueryTime)) + if p.cnf.Public.AcquireLockWaitTimeout > 0 { + if strings.Compare(p.mysqlVersion, "005007000") > 0 { + args = append(args, fmt.Sprintf("--lock-ddl-timeout=%d", p.cnf.Public.AcquireLockWaitTimeout)) + } + if strings.Compare(p.mysqlVersion, "008000000") >= 0 { + args = append(args, fmt.Sprintf("--backup-lock-timeout=%d", p.cnf.Public.AcquireLockWaitTimeout)) } } @@ -133,18 +141,13 @@ func (p *PhysicalDumper) buildArgs() []string { if p.isOfficial { args = append(args, "--skip-strict") } - if p.cnf.Public.AcquireLockWaitTimeout > 0 { - args = append(args, fmt.Sprintf("--backup-lock-timeout=%d", p.cnf.Public.AcquireLockWaitTimeout)) - } } else { // xtrabackup_80 has no this args, and will report errors args = append(args, "--no-timestamp", "--lazy-backup-non-innodb", "--wait-last-flush=2") args = append(args, fmt.Sprintf("--ibbackup=%s", filepath.Join(p.dbbackupHome, p.innodbCmd.xtrabackupBin))) } - /* - if p.cnf.PhysicalBackup.ExtraOpt != "" { - args = append(args, p.cnf.PhysicalBackup.ExtraOpt) - } - */ + if p.cnf.PhysicalBackup.ExtraOpt != "" { + args = append(args, p.cnf.PhysicalBackup.ExtraOpt) + } return args } @@ -219,8 +222,9 @@ func (p *PhysicalDumper) Execute(enableTimeOut bool) error { err = cmd.Run() if err != nil { - errStrPrefix := fmt.Sprintf("tail 10 error from %s", xtrabackupLogFile) - errStrDetail, _ := util.GrepLinesFromFile(xtrabackupLogFile, []string{"ERROR", "fatal"}, 10, false, true) + errStrPrefix := fmt.Sprintf("tail 5 error from %s", xtrabackupLogFile) + errStrDetail, _ := util.GrepLinesFromFile(xtrabackupLogFile, []string{"ERROR", "fatal", "unknown"}, + 5, false, true) if len(errStrDetail) > 0 { logger.Log.Info(errStrPrefix) logger.Log.Error(errStrDetail) diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_privileges.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_privileges.go index de4a6f6ace..e1f91ad01a 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_privileges.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_privileges.go @@ -21,6 +21,7 @@ import ( "github.com/pkg/errors" "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/mysqlcomm" "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config" "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/dbareport" "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/logger" @@ -74,8 +75,9 @@ func BackupGrant(cfg *config.Public) error { defer func() { _ = db.Close() }() - - rows, err := db.Query("select user, host from mysql.user where user not in ('ADMIN','yw','dba_bak_all_sel')") + usersExclude := []string{"ADMIN", "yw", "dba_bak_all_sel", "mysql.infoschema", "mysql.session", "mysql.sys"} + rows, err := db.Query(fmt.Sprintf("select `user`, `host` from `mysql`.`user` where `user` not in (%s)", + mysqlcomm.UnsafeIn(usersExclude, "'"))) if err != nil { logger.Log.Errorf("can't send query to Mysql server %v\n", err) return err diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/precheck/check_disk_space.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/precheck/check_disk_space.go index a945b2c2a0..57a42cf469 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/precheck/check_disk_space.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/precheck/check_disk_space.go @@ -53,9 +53,9 @@ func DeleteOldBackup(cnf *config.Public, expireDays int) error { for _, fi := range dir { fileMatchOld := fmt.Sprintf("%s_%s", hostName, cnf.MysqlHost) - filePrefix := fmt.Sprintf("%d_%d_%s", cnf.BkBizId, cnf.ClusterId, cnf.MysqlHost) + fileMatch := fmt.Sprintf("_%d_%s", cnf.ClusterId, cnf.MysqlHost) // cnf.BkBizId 转业务了历史的也要删除 if fi.ModTime().Compare(expireTime) <= 0 { - if strings.HasPrefix(fi.Name(), filePrefix) || strings.Contains(fi.Name(), fileMatchOld) { + if strings.Contains(fi.Name(), fileMatch) || strings.Contains(fi.Name(), fileMatchOld) { fileName := filepath.Join(cnf.BackupDir, fi.Name()) if fi.Size() > 4*1024*1024*1024 { logger.Log.Infof("remove old backup file %s limit %dMB/s ", fileName, cnf.IOLimitMBPerSec) diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/spider/spider_service.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/spider/spider_service.go index 4b0c6c6e16..ab3b5bd027 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/spider/spider_service.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/spider/spider_service.go @@ -156,7 +156,7 @@ func (b GlobalBackupModel) queryBackupTasks(retries int, db *sqlx.DB) (backupTas if err != nil { return nil, err } - logger.Log.Infof("queryBackupTasks port=%d, sqlStr:%s, sqlArgs:%v", b.Port, sqlStr, sqlArgs) + logger.Log.Infof("queryBackupTasks for port=%d, sqlStr:%s, sqlArgs:%v", b.Port, sqlStr, sqlArgs) if err = db.Select(&backupTasks, sqlStr, sqlArgs...); err != nil { logger.Log.Warnf("fail to queryBackupTasks: %s, retries %d", err.Error(), retries) diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/util/misc.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/util/misc.go index 1db01b8cba..31b5d1ea6e 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/util/misc.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/util/misc.go @@ -279,7 +279,7 @@ func GrepLinesFromFile(logFilePath string, keywords []string, linesRet int, sens var grepCommand []string lineNum := "-" + cast.ToString(linesRet) if len(keywords) > 0 { - grepExpr := strings.Join(keywords, "|") + grepExpr := "'" + strings.Join(keywords, "|") + "'" if sensitive { grepCommand = append(grepCommand, "grep", "-E") } else {