diff --git a/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java b/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java
index f5b82557..ba2aeef5 100644
--- a/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java
+++ b/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java
@@ -132,7 +132,7 @@ public Result generateHostAgentCommand(
}
/**
- * 启动 主机上服务启动
+ * 启动/停止 主机上服务启动
* @param clusterHostIds
* @param commandType
* @return
diff --git a/datasophon-api/src/main/resources/assembly.xml b/datasophon-api/src/main/resources/assembly.xml
index c670c16a..c4007947 100644
--- a/datasophon-api/src/main/resources/assembly.xml
+++ b/datasophon-api/src/main/resources/assembly.xml
@@ -175,5 +175,14 @@
0755
0755
+
+ ${basedir}/../datasophon-init/
+
+ **/*.*
+
+ datasophon-init
+ 0755
+ unix
+
diff --git a/datasophon-api/src/main/resources/db/migration/1.1.0/R1.1.0.sql b/datasophon-api/src/main/resources/db/migration/1.1.0/R1.1.0.sql
index dd988beb..e69de29b 100644
--- a/datasophon-api/src/main/resources/db/migration/1.1.0/R1.1.0.sql
+++ b/datasophon-api/src/main/resources/db/migration/1.1.0/R1.1.0.sql
@@ -1,40 +0,0 @@
-DROP TABLE IF EXISTS `migration_history`;
-DROP TABLE IF EXISTS `t_ddh_access_token`;
-DROP TABLE IF EXISTS `t_ddh_alert_group`;
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_expression`;
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_group_map`;
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_history`;
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_quota`;
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_rule`;
-DROP TABLE IF EXISTS `t_ddh_cluster_group`;
-DROP TABLE IF EXISTS `t_ddh_cluster_host`;
-DROP TABLE IF EXISTS `t_ddh_cluster_info`;
-DROP TABLE IF EXISTS `t_ddh_cluster_node_label`;
-DROP TABLE IF EXISTS `t_ddh_cluster_queue_capacity`;
-DROP TABLE IF EXISTS `t_ddh_cluster_rack`;
-DROP TABLE IF EXISTS `t_ddh_cluster_role_user`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command_host`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command_host_command`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_dashboard`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_instance`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_instance_role_group`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_group_config`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_instance`;
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_instance_webuis`;
-DROP TABLE IF EXISTS `t_ddh_cluster_user`;
-DROP TABLE IF EXISTS `t_ddh_cluster_user_group`;
-DROP TABLE IF EXISTS `t_ddh_cluster_variable`;
-DROP TABLE IF EXISTS `t_ddh_cluster_yarn_queue`;
-DROP TABLE IF EXISTS `t_ddh_cluster_yarn_scheduler`;
-DROP TABLE IF EXISTS `t_ddh_cluster_zk`;
-DROP TABLE IF EXISTS `t_ddh_command`;
-DROP TABLE IF EXISTS `t_ddh_frame_info`;
-DROP TABLE IF EXISTS `t_ddh_frame_service`;
-DROP TABLE IF EXISTS `t_ddh_frame_service_role`;
-DROP TABLE IF EXISTS `t_ddh_install_step`;
-DROP TABLE IF EXISTS `t_ddh_notice_group`;
-DROP TABLE IF EXISTS `t_ddh_notice_group_user`;
-DROP TABLE IF EXISTS `t_ddh_role_info`;
-DROP TABLE IF EXISTS `t_ddh_session`;
-DROP TABLE IF EXISTS `t_ddh_user_info`;
diff --git a/datasophon-api/src/main/resources/db/migration/1.1.0/V1.1.0__DDL.sql b/datasophon-api/src/main/resources/db/migration/1.1.0/V1.1.0__DDL.sql
index 47c83b3a..7f32757a 100644
--- a/datasophon-api/src/main/resources/db/migration/1.1.0/V1.1.0__DDL.sql
+++ b/datasophon-api/src/main/resources/db/migration/1.1.0/V1.1.0__DDL.sql
@@ -4,7 +4,7 @@ SET FOREIGN_KEY_CHECKS = 0;
-- ----------------------------
-- Table structure for t_ddh_access_token
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_access_token`;
+
CREATE TABLE `t_ddh_access_token` (
`id` int(10) NOT NULL,
`user_id` int(10) DEFAULT NULL,
@@ -19,7 +19,7 @@ CREATE TABLE `t_ddh_access_token` (
-- ----------------------------
-- Table structure for t_ddh_alert_group
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_alert_group`;
+
CREATE TABLE `t_ddh_alert_group` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`alert_group_name` varchar(32) DEFAULT NULL COMMENT '告警组名称',
@@ -32,7 +32,7 @@ CREATE TABLE `t_ddh_alert_group` (
-- ----------------------------
-- Table structure for t_ddh_cluster_alert_expression
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_expression`;
+
CREATE TABLE `t_ddh_cluster_alert_expression` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增 ID',
`name` varchar(255) DEFAULT NULL COMMENT '指标名称',
@@ -50,7 +50,7 @@ CREATE TABLE `t_ddh_cluster_alert_expression` (
-- ----------------------------
-- Table structure for t_ddh_cluster_alert_group_map
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_group_map`;
+
CREATE TABLE `t_ddh_cluster_alert_group_map` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`cluster_id` int(10) DEFAULT NULL,
@@ -61,7 +61,7 @@ CREATE TABLE `t_ddh_cluster_alert_group_map` (
-- ----------------------------
-- Table structure for t_ddh_cluster_alert_history
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_history`;
+
CREATE TABLE `t_ddh_cluster_alert_history` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`alert_group_name` varchar(32) DEFAULT NULL COMMENT '告警组',
@@ -82,7 +82,7 @@ CREATE TABLE `t_ddh_cluster_alert_history` (
-- ----------------------------
-- Table structure for t_ddh_cluster_alert_quota
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_quota`;
+
CREATE TABLE `t_ddh_cluster_alert_quota` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`alert_quota_name` varchar(32) DEFAULT NULL COMMENT '告警指标名称',
@@ -106,7 +106,7 @@ CREATE TABLE `t_ddh_cluster_alert_quota` (
-- ----------------------------
-- Table structure for t_ddh_cluster_alert_rule
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_rule`;
+
CREATE TABLE `t_ddh_cluster_alert_rule` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增 ID',
`expression_id` bigint(20) NOT NULL COMMENT '表达式 ID',
@@ -130,7 +130,7 @@ CREATE TABLE `t_ddh_cluster_alert_rule` (
-- ----------------------------
-- Table structure for t_ddh_cluster_group
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_group`;
+
CREATE TABLE `t_ddh_cluster_group` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`group_name` varchar(255) DEFAULT NULL,
@@ -141,7 +141,7 @@ CREATE TABLE `t_ddh_cluster_group` (
-- ----------------------------
-- Table structure for t_ddh_cluster_host
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_host`;
+
CREATE TABLE `t_ddh_cluster_host` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
@@ -166,7 +166,7 @@ CREATE TABLE `t_ddh_cluster_host` (
-- ----------------------------
-- Table structure for t_ddh_cluster_info
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_info`;
+
CREATE TABLE `t_ddh_cluster_info` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`create_by` varchar(128) DEFAULT NULL COMMENT '创建人',
@@ -183,7 +183,7 @@ CREATE TABLE `t_ddh_cluster_info` (
-- ----------------------------
-- Table structure for t_ddh_cluster_node_label
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_node_label`;
+
CREATE TABLE `t_ddh_cluster_node_label` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`cluster_id` int(10) DEFAULT NULL,
@@ -194,7 +194,7 @@ CREATE TABLE `t_ddh_cluster_node_label` (
-- ----------------------------
-- Table structure for t_ddh_cluster_queue_capacity
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_queue_capacity`;
+
CREATE TABLE `t_ddh_cluster_queue_capacity` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`cluster_id` int(10) DEFAULT NULL,
@@ -210,7 +210,7 @@ CREATE TABLE `t_ddh_cluster_queue_capacity` (
-- ----------------------------
-- Table structure for t_ddh_cluster_rack
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_rack`;
+
CREATE TABLE `t_ddh_cluster_rack` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`rack` varchar(255) DEFAULT NULL,
@@ -221,7 +221,7 @@ CREATE TABLE `t_ddh_cluster_rack` (
-- ----------------------------
-- Table structure for t_ddh_cluster_role_user
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_role_user`;
+
CREATE TABLE `t_ddh_cluster_role_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`cluster_id` int(11) DEFAULT NULL COMMENT '集群id',
@@ -233,7 +233,7 @@ CREATE TABLE `t_ddh_cluster_role_user` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_command
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command`;
+
CREATE TABLE `t_ddh_cluster_service_command` (
`command_id` varchar(128) NOT NULL COMMENT '主键',
`create_by` varchar(32) DEFAULT NULL COMMENT '创建人',
@@ -253,7 +253,6 @@ CREATE TABLE `t_ddh_cluster_service_command` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_command_host
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command_host`;
CREATE TABLE `t_ddh_cluster_service_command_host` (
`command_host_id` varchar(128) NOT NULL DEFAULT '1' COMMENT '主键',
`hostname` varchar(32) DEFAULT NULL COMMENT '主机',
@@ -267,7 +266,7 @@ CREATE TABLE `t_ddh_cluster_service_command_host` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_command_host_command
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command_host_command`;
+
CREATE TABLE `t_ddh_cluster_service_command_host_command` (
`host_command_id` varchar(128) NOT NULL DEFAULT '1' COMMENT '主键',
`command_name` varchar(256) DEFAULT NULL COMMENT '指令名称',
@@ -288,7 +287,7 @@ CREATE TABLE `t_ddh_cluster_service_command_host_command` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_dashboard
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_dashboard`;
+
CREATE TABLE `t_ddh_cluster_service_dashboard` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主机',
`service_name` varchar(128) DEFAULT NULL COMMENT '服务名称',
@@ -299,7 +298,7 @@ CREATE TABLE `t_ddh_cluster_service_dashboard` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_instance
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_instance`;
+
CREATE TABLE `t_ddh_cluster_service_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`cluster_id` int(11) DEFAULT NULL COMMENT '集群id',
@@ -317,7 +316,7 @@ CREATE TABLE `t_ddh_cluster_service_instance` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_instance_role_group
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_instance_role_group`;
+
CREATE TABLE `t_ddh_cluster_service_instance_role_group` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`role_group_name` varchar(255) DEFAULT NULL,
@@ -332,7 +331,7 @@ CREATE TABLE `t_ddh_cluster_service_instance_role_group` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_role_group_config
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_group_config`;
+
CREATE TABLE `t_ddh_cluster_service_role_group_config` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`role_group_id` int(10) DEFAULT NULL,
@@ -351,7 +350,7 @@ CREATE TABLE `t_ddh_cluster_service_role_group_config` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_role_instance
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_instance`;
+
CREATE TABLE `t_ddh_cluster_service_role_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`service_role_name` varchar(32) DEFAULT NULL COMMENT '服务角色名称',
@@ -371,7 +370,7 @@ CREATE TABLE `t_ddh_cluster_service_role_instance` (
-- ----------------------------
-- Table structure for t_ddh_cluster_service_role_instance_webuis
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_instance_webuis`;
+
CREATE TABLE `t_ddh_cluster_service_role_instance_webuis` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`service_role_instance_id` int(10) DEFAULT NULL COMMENT '服务角色id',
@@ -385,7 +384,7 @@ CREATE TABLE `t_ddh_cluster_service_role_instance_webuis` (
-- ----------------------------
-- Table structure for t_ddh_cluster_user
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_user`;
+
CREATE TABLE `t_ddh_cluster_user` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`username` varchar(255) DEFAULT NULL,
@@ -396,7 +395,7 @@ CREATE TABLE `t_ddh_cluster_user` (
-- ----------------------------
-- Table structure for t_ddh_cluster_user_group
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_user_group`;
+
CREATE TABLE `t_ddh_cluster_user_group` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`user_id` int(10) DEFAULT NULL,
@@ -409,7 +408,7 @@ CREATE TABLE `t_ddh_cluster_user_group` (
-- ----------------------------
-- Table structure for t_ddh_cluster_variable
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_variable`;
+
CREATE TABLE `t_ddh_cluster_variable` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`cluster_id` int(10) DEFAULT NULL,
@@ -422,7 +421,7 @@ CREATE TABLE `t_ddh_cluster_variable` (
-- ----------------------------
-- Table structure for t_ddh_cluster_yarn_queue
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_yarn_queue`;
+
CREATE TABLE `t_ddh_cluster_yarn_queue` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`queue_name` varchar(255) DEFAULT NULL,
@@ -443,7 +442,7 @@ CREATE TABLE `t_ddh_cluster_yarn_queue` (
-- ----------------------------
-- Table structure for t_ddh_cluster_yarn_scheduler
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_yarn_scheduler`;
+
CREATE TABLE `t_ddh_cluster_yarn_scheduler` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`cluster_id` int(11) DEFAULT NULL,
@@ -455,7 +454,7 @@ CREATE TABLE `t_ddh_cluster_yarn_scheduler` (
-- ----------------------------
-- Table structure for t_ddh_cluster_zk
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_zk`;
+
CREATE TABLE `t_ddh_cluster_zk` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`zk_server` varchar(255) DEFAULT NULL,
@@ -467,7 +466,7 @@ CREATE TABLE `t_ddh_cluster_zk` (
-- ----------------------------
-- Table structure for t_ddh_command
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_command`;
+
CREATE TABLE `t_ddh_command` (
`id` int(10) NOT NULL,
`command_type` int(2) DEFAULT NULL,
@@ -477,7 +476,7 @@ CREATE TABLE `t_ddh_command` (
-- ----------------------------
-- Table structure for t_ddh_frame_info
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_frame_info`;
+
CREATE TABLE `t_ddh_frame_info` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`frame_name` varchar(128) DEFAULT NULL COMMENT '框架名称',
@@ -490,7 +489,7 @@ CREATE TABLE `t_ddh_frame_info` (
-- ----------------------------
-- Table structure for t_ddh_frame_service
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_frame_service`;
+
CREATE TABLE `t_ddh_frame_service` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`frame_id` int(11) DEFAULT NULL COMMENT '版本id',
@@ -515,7 +514,7 @@ CREATE TABLE `t_ddh_frame_service` (
-- ----------------------------
-- Table structure for t_ddh_frame_service_role
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_frame_service_role`;
+
CREATE TABLE `t_ddh_frame_service_role` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`service_id` int(11) DEFAULT NULL COMMENT '服务id',
@@ -534,7 +533,7 @@ CREATE TABLE `t_ddh_frame_service_role` (
-- ----------------------------
-- Table structure for t_ddh_install_step
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_install_step`;
+
CREATE TABLE `t_ddh_install_step` (
`id` int(10) NOT NULL AUTO_INCREMENT,
`step_name` varchar(128) DEFAULT NULL,
@@ -547,7 +546,7 @@ CREATE TABLE `t_ddh_install_step` (
-- ----------------------------
-- Table structure for t_ddh_notice_group
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_notice_group`;
+
CREATE TABLE `t_ddh_notice_group` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`notice_group_name` varchar(32) DEFAULT NULL COMMENT '通知组名称',
@@ -559,7 +558,7 @@ CREATE TABLE `t_ddh_notice_group` (
-- ----------------------------
-- Table structure for t_ddh_notice_group_user
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_notice_group_user`;
+
CREATE TABLE `t_ddh_notice_group_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`notice_group_id` int(11) DEFAULT NULL COMMENT '通知组id',
@@ -571,7 +570,7 @@ CREATE TABLE `t_ddh_notice_group_user` (
-- ----------------------------
-- Table structure for t_ddh_role_info
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_role_info`;
+
CREATE TABLE `t_ddh_role_info` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`role_name` varchar(128) DEFAULT NULL COMMENT '角色名称',
@@ -583,7 +582,7 @@ CREATE TABLE `t_ddh_role_info` (
-- ----------------------------
-- Table structure for t_ddh_session
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_session`;
+
CREATE TABLE `t_ddh_session` (
`id` varchar(128) NOT NULL,
`user_id` int(10) DEFAULT NULL,
@@ -597,7 +596,7 @@ CREATE TABLE `t_ddh_session` (
-- ----------------------------
-- Table structure for t_ddh_user_info
-- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_user_info`;
+
CREATE TABLE `t_ddh_user_info` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`username` varchar(128) DEFAULT NULL COMMENT '用户名',
diff --git a/datasophon-api/src/main/resources/db/migration/1.1.3/V1.1.3__DML.sql b/datasophon-api/src/main/resources/db/migration/1.1.3/V1.1.3__DML.sql
deleted file mode 100644
index e69de29b..00000000
diff --git a/datasophon-api/src/main/resources/db/migration/1.1.3/R1.1.3.sql b/datasophon-api/src/main/resources/db/migration/1.2.0/R1.2.0.sql
similarity index 100%
rename from datasophon-api/src/main/resources/db/migration/1.1.3/R1.1.3.sql
rename to datasophon-api/src/main/resources/db/migration/1.2.0/R1.2.0.sql
diff --git a/datasophon-api/src/main/resources/db/migration/1.1.3/V1.1.3__DDL.sql b/datasophon-api/src/main/resources/db/migration/1.2.0/V1.2.0__DDL.sql
similarity index 100%
rename from datasophon-api/src/main/resources/db/migration/1.1.3/V1.1.3__DDL.sql
rename to datasophon-api/src/main/resources/db/migration/1.2.0/V1.2.0__DDL.sql
diff --git a/datasophon-api/src/main/resources/db/migration/1.2.0/V1.2.0__DML.sql b/datasophon-api/src/main/resources/db/migration/1.2.0/V1.2.0__DML.sql
new file mode 100644
index 00000000..48ef0587
--- /dev/null
+++ b/datasophon-api/src/main/resources/db/migration/1.2.0/V1.2.0__DML.sql
@@ -0,0 +1 @@
+INSERT INTO `t_ddh_cluster_alert_quota` (`id`, `alert_quota_name`, `service_category`, `alert_expr`, `alert_level`, `alert_group_id`, `notice_group_id`, `alert_advice`, `compare_method`, `alert_threshold`, `alert_tactic`, `interval_duration`, `trigger_duration`, `service_role_name`, `quota_state`, `create_time`) VALUES (632, 'DorisFEObserver进程存活', 'DORIS', 'up{group=\"fe\",job=\"doris\"}', 2, 23, 1, '重启启动', '!=', 1, 1, 1, 15, 'DorisFEObserver', 1, '2023-09-27 11:30:55');
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ALERTMANAGER/control.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ALERTMANAGER/control.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ALERTMANAGER/control.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ALERTMANAGER/control.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ALERTMANAGER/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ALERTMANAGER/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ALERTMANAGER/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ALERTMANAGER/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/DORIS/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/DORIS/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/DORIS/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/DORIS/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/DORIS/status_be.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/DORIS/status_be.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/DORIS/status_be.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/DORIS/status_be.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/DORIS/status_fe.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/DORIS/status_fe.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/DORIS/status_fe.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/DORIS/status_fe.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/DS/control_ds.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/DS/control_ds.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/DS/control_ds.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/DS/control_ds.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/DS/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/DS/service_ddl.json
similarity index 99%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/DS/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/DS/service_ddl.json
index cd3cb21f..a82de295 100644
--- a/datasophon-api/src/main/resources/meta/DDP-1.1.3/DS/service_ddl.json
+++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/DS/service_ddl.json
@@ -2,11 +2,11 @@
"name": "DS",
"label": "DolphinScheduler",
"description": "分布式易扩展的可视化工作流任务调度平台",
- "version": "3.1.1",
+ "version": "3.1.8",
"sortNum": 14,
"dependencies":["ZOOKEEPER"],
- "packageName": "dolphinscheduler-3.1.1.tar.gz",
- "decompressPackageName": "dolphinscheduler-3.1.1",
+ "packageName": "dolphinscheduler-3.1.8.tar.gz",
+ "decompressPackageName": "dolphinscheduler-3.1.8",
"roles": [
{
"name": "ApiServer",
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ELASTICSEARCH/control_es.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ELASTICSEARCH/control_es.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ELASTICSEARCH/control_es.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ELASTICSEARCH/control_es.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ELASTICSEARCH/control_es_exporter.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ELASTICSEARCH/control_es_exporter.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ELASTICSEARCH/control_es_exporter.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ELASTICSEARCH/control_es_exporter.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ELASTICSEARCH/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ELASTICSEARCH/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ELASTICSEARCH/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ELASTICSEARCH/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/FLINK/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/FLINK/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/FLINK/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/FLINK/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/GRAFANA/control.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/GRAFANA/control.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/GRAFANA/control.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/GRAFANA/control.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/GRAFANA/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/GRAFANA/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/GRAFANA/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/GRAFANA/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HBASE/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HBASE/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/HBASE/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/HBASE/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HDFS/control_hadoop.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/control_hadoop.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/HDFS/control_hadoop.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/control_hadoop.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HDFS/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json
similarity index 96%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/HDFS/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json
index 7867bbff..758d88f2 100644
--- a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HDFS/service_ddl.json
+++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json
@@ -193,6 +193,9 @@
"hadoop.proxyuser.hive.hosts",
"hadoop.proxyuser.hive.groups",
"hadoop.proxyuser.hive.users",
+ "hadoop.proxyuser.kyuubi.hosts",
+ "hadoop.proxyuser.kyuubi.groups",
+ "hadoop.proxyuser.kyuubi.users",
"hadoop.http.staticuser.user",
"ha.zookeeper.quorum",
"hadoop.tmp.dir",
@@ -344,6 +347,39 @@
"hidden": false,
"defaultValue": "*"
},
+ {
+ "name": "hadoop.proxyuser.kyuubi.hosts",
+ "label": "允许通过代理访问的主机节点",
+ "description": "配置kyuubi允许通过代理访问的主机节点",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "*"
+ },
+ {
+ "name": "hadoop.proxyuser.kyuubi.groups",
+ "label": "允许通过代理用户所属组",
+ "description": "配置kyuubi允许通过代理用户所属组",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "*"
+ },
+ {
+ "name": "hadoop.proxyuser.kyuubi.users",
+ "label": "允许通过代理的用户",
+ "description": "配置kyuubi允许通过代理的用户",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "*"
+ },
{
"name": "dfs.replication",
"label": "BLOCK副本数",
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HIVE/control_hive.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HIVE/control_hive.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/HIVE/control_hive.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/HIVE/control_hive.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HIVE/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HIVE/service_ddl.json
similarity index 99%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/HIVE/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/HIVE/service_ddl.json
index b71dd027..9d9b825e 100644
--- a/datasophon-api/src/main/resources/meta/DDP-1.1.3/HIVE/service_ddl.json
+++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HIVE/service_ddl.json
@@ -371,7 +371,7 @@
},
{
"name": "hive.exec.submit.local.task.via.child",
- "label": "地任务是否单独启动进程",
+ "label": "本地任务是否单独启动进程",
"description": "决定本地任务(通常是mapjoin的哈希表生成阶段)是否在单独的JVM中运行",
"required": true,
"configType": "map",
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.2.0/ICEBERG/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ICEBERG/service_ddl.json
new file mode 100644
index 00000000..aba6d51a
--- /dev/null
+++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ICEBERG/service_ddl.json
@@ -0,0 +1,68 @@
+{
+ "name": "ICEBERG",
+ "label": "Iceberg",
+ "description": "适用于庞大数据集分析的开放式数据表格式",
+ "version": "1.4.0",
+ "sortNum": 31,
+ "dependencies":[],
+ "packageName": "iceberg-1.4.0.tar.gz",
+ "decompressPackageName": "iceberg-1.4.0",
+ "roles": [
+ {
+ "name": "IcebergClient",
+ "label": "IcebergClient",
+ "roleType": "client",
+ "cardinality": "1+",
+ "logFile": "iceberg.log"
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "config.properties",
+ "configFormat": "properties",
+ "outputDirectory": "",
+ "includeParams": [
+ "hiveEnableIceberg",
+ "spark3EnableIceberg",
+ "flinkEnableIceberg"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "hiveEnableIceberg",
+ "label": "hive集成iceberg",
+ "description": "hive集成iceberg",
+ "required": true,
+ "type": "switch",
+ "value": true,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": true
+ },
+ {
+ "name": "spark3EnableIceberg",
+ "label": "spark3集成iceberg",
+ "description": "spark3集成iceberg",
+ "required": true,
+ "type": "switch",
+ "value": true,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": true
+ },
+ {
+ "name": "flinkEnableIceberg",
+ "label": "flink集成iceberg",
+ "description": "flink集成iceberg",
+ "required": true,
+ "type": "switch",
+ "value": true,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": true
+ }
+ ]
+}
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/KAFKA/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KAFKA/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/KAFKA/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/KAFKA/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/KERBEROS/kerberos.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KERBEROS/kerberos.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/KERBEROS/kerberos.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/KERBEROS/kerberos.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/KERBEROS/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KERBEROS/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/KERBEROS/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/KERBEROS/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json
new file mode 100644
index 00000000..3009cde5
--- /dev/null
+++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json
@@ -0,0 +1,354 @@
+{
+ "name": "KYUUBI",
+ "label": "Kyuubi",
+ "description": "统一多租户JDBC网关",
+ "version": "1.7.3",
+ "sortNum": 30,
+ "dependencies":[],
+ "packageName": "kyuubi-1.7.3.tar.gz",
+ "decompressPackageName": "kyuubi-1.7.3",
+ "roles": [
+ {
+ "name": "KyuubiServer",
+ "label": "KyuubiServer",
+ "roleType": "master",
+ "runAs": {
+ "user": "kyuubi",
+ "group": "hadoop"
+ },
+ "cardinality": "1+",
+ "logFile": "logs/kyuubi-server-${host}.out",
+ "startRunner": {
+ "timeout": "60",
+ "program": "bin/kyuubi",
+ "args": [
+ "start"
+ ]
+ },
+ "stopRunner": {
+ "timeout": "600",
+ "program": "bin/kyuubi",
+ "args": [
+ "stop"
+ ]
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "bin/kyuubi",
+ "args": [
+ "status"
+ ]
+ },
+ "restartRunner": {
+ "timeout": "60",
+ "program": "bin/kyuubi",
+ "args": [
+ "restart"
+ ]
+ }
+ },
+ {
+ "name": "KyuubiClient",
+ "label": "KyuubiClient",
+ "roleType": "client",
+ "cardinality": "1+",
+ "logFile": "",
+ "runAs": {
+ "user": "kyuubi",
+ "group": "hadoop"
+ }
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "kyuubi-defaults.conf",
+ "configFormat": "properties2",
+ "outputDirectory": "conf",
+ "includeParams": [
+ "kyuubi.ha.zookeeper.namespace",
+ "kyuubi.ha.zookeeper.quorum",
+ "kyuubi.session.idle.timeout",
+ "kyuubi.session.engine.idle.timeout",
+ "kyuubi.session.engine.initialize.timeout",
+ "spark.master",
+ "kyuubi.metrics.reporters",
+ "kyuubi.metrics.prometheus.port",
+ "kyuubi.session.engine.spark.showProgress",
+ "kyuubi.metrics.enabled",
+ "enableKerberos",
+ "kyuubi.kinit.principal",
+ "kyuubi.kinit.keytab",
+ "kyuubi.authentication",
+ "custom.kyuubi-defaults.conf"
+ ]
+ },
+ {
+ "filename": "kyuubi-env.sh",
+ "configFormat": "custom",
+ "outputDirectory": "conf",
+ "templateName": "kyuubi-env.ftl",
+ "includeParams": [
+ "javaHome",
+ "sparkHome",
+ "hadoopConfDir",
+ "kyuubiServerHeapSize",
+ "kyuubiClientHeapSize",
+ "custom.kyuubi-env.sh"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "kyuubi.ha.zookeeper.quorum",
+ "label": "zookeeper服务信息",
+ "description": "zookeeper服务信息",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "kyuubi.ha.zookeeper.namespace",
+ "label": "zookeeper目录",
+ "description": "zookeeper目录",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "kyuubi.session.idle.timeout",
+ "label": "会话超时时间",
+ "description": "会话超时时间",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "PT6H"
+ },
+ {
+ "name": "kyuubi.session.engine.idle.timeout",
+ "label": "引擎超时时间",
+ "description": "引擎超时时间",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "PT30M"
+ },
+ {
+ "name": "spark.master",
+ "label": "配置spark为onYarn模式",
+ "description": "配置spark为onYarn模式",
+ "required": true,
+ "type": "input",
+ "value": "yarn",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "yarn"
+ },
+ {
+ "name": "kyuubi.metrics.reporters",
+ "label": "监控输出格式",
+ "description": "监控输出格式",
+ "required": true,
+ "type": "input",
+ "value": "PROMETHEUS",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "PROMETHEUS"
+ },
+ {
+ "name": "kyuubi.metrics.prometheus.port",
+ "label": "监控服务端口",
+ "description": "监控服务端口",
+ "required": true,
+ "type": "input",
+ "value": "10019",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "10019"
+ },
+ {
+ "name": "kyuubi.session.engine.initialize.timeout",
+ "label": "引擎启动超时时间",
+ "description": "引擎启动超时时间",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "PT3M"
+ },
+ {
+ "name": "kyuubi.session.engine.spark.showProgress",
+ "label": "spark任务进度显示",
+ "description": "spark任务进度显示",
+ "required": true,
+ "type": "switch",
+ "value": false,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": false
+ },
+ {
+ "name": "kyuubi.metrics.enabled",
+ "label": "服务监控指标",
+ "description": "服务监控指标",
+ "required": true,
+ "type": "switch",
+ "value": true,
+ "configurableInWizard": false,
+ "hidden": false,
+ "defaultValue": true
+ },
+ {
+ "name": "javaHome",
+ "label": "java安装路径",
+ "description": "java安装路径",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "/usr/local/jdk1.8.0_333",
+ "configurableInWizard": false,
+ "hidden": false,
+ "defaultValue": "/usr/local/jdk1.8.0_333"
+ },
+ {
+ "name": "sparkHome",
+ "label": "spark安装目录",
+ "description": "spark安装目录",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "/opt/datasophon/spark-3.1.3/",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "/opt/datasophon/spark-3.1.3/"
+ },
+ {
+ "name": "hadoopConfDir",
+ "label": "hadoop配置目录",
+ "description": "hadoop配置目录",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "/opt/datasophon/hadoop-3.3.3/etc/hadoop",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "/opt/datasophon/hadoop-3.3.3/etc/hadoop"
+ },
+ {
+ "name": "kyuubiServerHeapSize",
+ "label": "KyuubiServerjvm内存",
+ "description": "KyuubiServerjvm内存",
+ "configType": "map",
+ "required": true,
+ "minValue": 0,
+ "maxValue": 32,
+ "type": "slider",
+ "value": "",
+ "unit": "GB",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "4"
+ },
+ {
+ "name": "kyuubiClientHeapSize",
+ "label": "KyuubiClientjvm内存",
+ "description": "KyuubiClientjvm内存",
+ "configType": "map",
+ "required": true,
+ "minValue": 0,
+ "maxValue": 32,
+ "type": "slider",
+ "value": "",
+ "unit": "GB",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "2"
+ },
+ {
+ "name": "enableKerberos",
+ "label": "开启Kerberos认证",
+ "description": "开启Kerberos认证",
+ "required": false,
+ "type": "switch",
+ "value": false,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": false
+ },
+ {
+ "name": "kyuubi.authentication",
+ "label": "Kyuubi服务认证方式",
+ "description": "",
+ "configWithKerberos": true,
+ "required": false,
+ "configType": "kb",
+ "type": "input",
+ "value": "KERBEROS",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "KERBEROS"
+ },
+ {
+ "name": "kyuubi.kinit.principal",
+ "label": "Kyuubi服务的Kerberos主体",
+ "description": "",
+ "configWithKerberos": true,
+ "required": false,
+ "configType": "kb",
+ "type": "input",
+ "value": "kyuubi/${host}@${realm}",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "kyuubi/${host}@${realm}"
+ },
+ {
+ "name": "kyuubi.kinit.keytab",
+ "label": "Kyuubi服务的Kerberos密钥文件路径",
+ "description": "",
+ "configWithKerberos": true,
+ "required": false,
+ "configType": "kb",
+ "type": "input",
+ "value": "/etc/security/keytab/kyuubi.service.keytab",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "/etc/security/keytab/kyuubi.service.keytab"
+ },
+ {
+ "name": "custom.kyuubi-env.sh",
+ "label": "自定义配置kyuubi-env.sh",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "custom.kyuubi-defaults.conf",
+ "label": "自定义配置kyuubi-defaults.conf",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ }
+ ]
+}
\ No newline at end of file
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/PROMETHEUS/control.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/PROMETHEUS/control.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/PROMETHEUS/control.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/PROMETHEUS/control.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/PROMETHEUS/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/PROMETHEUS/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/PROMETHEUS/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/PROMETHEUS/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/RANGER/ranger_admin.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/RANGER/ranger_admin.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/RANGER/ranger_admin.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/RANGER/ranger_admin.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/RANGER/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/RANGER/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/RANGER/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/RANGER/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/SPARK3/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/SPARK3/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/SPARK3/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/SPARK3/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/SPARK3/status-master.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/SPARK3/status-master.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/SPARK3/status-master.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/SPARK3/status-master.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/SPARK3/status-slave.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/SPARK3/status-slave.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/SPARK3/status-slave.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/SPARK3/status-slave.sh
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/STREAMPARK/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/STREAMPARK/service_ddl.json
similarity index 73%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/STREAMPARK/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/STREAMPARK/service_ddl.json
index 15b8db1a..1e082146 100644
--- a/datasophon-api/src/main/resources/meta/DDP-1.1.3/STREAMPARK/service_ddl.json
+++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/STREAMPARK/service_ddl.json
@@ -2,18 +2,18 @@
"name": "STREAMPARK",
"label": "StreamPark",
"description": "流处理极速开发框架,流批一体&湖仓一体的云原生平台,一站式流处理计算平台",
- "version": "1.2.3",
+ "version": "2.1.1",
"sortNum": 13,
"dependencies":[],
- "packageName": "streampark-1.2.3.tar.gz",
- "decompressPackageName": "streampark-1.2.3",
+ "packageName": "streampark-2.1.1.tar.gz",
+ "decompressPackageName": "streampark-2.1.1",
"roles": [
{
"name": "StreamPark",
"label": "StreamPark",
"roleType": "master",
"cardinality": "1",
- "logFile": "logs/streamx.out",
+ "logFile": "logs/streampark.out",
"jmxPort": 10086,
"startRunner": {
"timeout": "60",
@@ -29,17 +29,16 @@
},
"statusRunner": {
"timeout": "60",
- "program": "bin/streamx.sh",
+ "program": "bin/streampark.sh",
"args": [
"status"
]
},
"restartRunner": {
"timeout": "60",
- "program": "control.sh",
+ "program": "bin/streampark.sh",
"args": [
- "restart",
- "api-server"
+ "restart"
]
},
"externalLink": {
@@ -61,7 +60,9 @@
"username",
"password",
"serverPort",
- "hadoopUserName"
+ "hadoopUserName",
+ "workspaceLocal",
+ "workspaceRemote"
]
}
]
@@ -126,6 +127,30 @@
"configurableInWizard": true,
"hidden": false,
"defaultValue": "root"
+ },
+ {
+ "name": "workspaceLocal",
+ "label": "StreamPark本地工作空间目录",
+ "description": "自行创建,用于存放项目源码,构建的目录等",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "/data/streampark/workspace"
+ },
+ {
+ "name": "workspaceRemote",
+ "label": "StreamPark HDFS工作空间目录",
+ "description": "HDFS工作空间目录",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "hdfs://nameservice1/streampark"
}
]
}
\ No newline at end of file
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/TRINO/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/TRINO/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/TRINO/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/TRINO/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/YARN/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/YARN/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/YARN/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/YARN/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ZOOKEEPER/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ZOOKEEPER/service_ddl.json
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ZOOKEEPER/service_ddl.json
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ZOOKEEPER/service_ddl.json
diff --git a/datasophon-api/src/main/resources/meta/DDP-1.1.3/ZOOKEEPER/zkStatus.sh b/datasophon-api/src/main/resources/meta/DDP-1.2.0/ZOOKEEPER/zkStatus.sh
similarity index 100%
rename from datasophon-api/src/main/resources/meta/DDP-1.1.3/ZOOKEEPER/zkStatus.sh
rename to datasophon-api/src/main/resources/meta/DDP-1.2.0/ZOOKEEPER/zkStatus.sh
diff --git a/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java b/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java
index 09243b5a..04e9406d 100644
--- a/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java
+++ b/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java
@@ -17,6 +17,7 @@
package com.datasophon.common.model;
+import com.datasophon.common.enums.CommandType;
import lombok.Data;
import java.io.Serializable;
@@ -44,11 +45,14 @@ public class WorkerServiceMessage implements Serializable {
*/
private String ip;
+ private CommandType commandType;
+
public WorkerServiceMessage() {
}
- public WorkerServiceMessage(String hostname, Integer clusterId) {
+ public WorkerServiceMessage(String hostname, Integer clusterId,CommandType commandType) {
this.hostname = hostname;
this.clusterId = clusterId;
+ this.commandType = commandType;
}
}
diff --git a/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java b/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java
index d3de76f1..1adfe5f1 100644
--- a/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java
+++ b/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java
@@ -35,4 +35,6 @@
public interface ClusterServiceRoleInstanceMapper extends BaseMapper {
void updateToNeedRestart(@Param("roleGroupId") Integer roleGroupId);
+
+ void updateToNeedRestartByHost(@Param("hostName") String hostName);
}
diff --git a/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml b/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml
index 72effecb..614c7fde 100644
--- a/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml
+++ b/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml
@@ -26,4 +26,10 @@
where role_group_id = #{roleGroupId}
+
+ update t_ddh_cluster_service_role_instance
+ set need_restart = 2
+ where hostname = #{hostName}
+
+
\ No newline at end of file
diff --git a/datasophon-init/bin/init-mysql-datasophon.sh b/datasophon-init/bin/init-mysql-datasophon.sh
index d6d8609c..c1cc2fb3 100644
--- a/datasophon-init/bin/init-mysql-datasophon.sh
+++ b/datasophon-init/bin/init-mysql-datasophon.sh
@@ -38,12 +38,6 @@ EOF
echo " init user datasophon finished."
echo " init database datasophon finished."
- mysql -udatasophon -p'datasophon' < <',
- `alert_threshold` bigint(200) DEFAULT NULL COMMENT '告警阀值',
- `alert_tactic` int(11) DEFAULT NULL COMMENT '告警策略 1:单次2:连续',
- `interval_duration` int(11) DEFAULT NULL COMMENT '间隔时长 单位分钟',
- `trigger_duration` int(11) DEFAULT NULL COMMENT '触发时长 单位秒',
- `service_role_name` varchar(255) DEFAULT NULL COMMENT '服务角色名称',
- `quota_state` int(2) DEFAULT NULL COMMENT '1: 启用 2:未启用',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 628 DEFAULT CHARSET=utf8mb4 COMMENT = '集群告警指标表 ' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_alert_rule
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_alert_rule`;
-CREATE TABLE `t_ddh_cluster_alert_rule` (
- `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增 ID',
- `expression_id` bigint(20) NOT NULL COMMENT '表达式 ID',
- `is_predefined` varchar(255) DEFAULT NULL COMMENT '是否预定义',
- `compare_method` varchar(255) NOT NULL COMMENT '比较方式 如 大于 小于 等于 等',
- `threshold_value` varchar(255) NOT NULL COMMENT '阈值',
- `persistent_time` bigint(20) NOT NULL COMMENT '持续时长',
- `strategy` varchar(255) NOT NULL COMMENT '告警策略:单次,连续',
- `repeat_interval` bigint(11) DEFAULT NULL COMMENT '连续告警时 间隔时长',
- `alert_level` varchar(255) NOT NULL COMMENT '告警级别',
- `alert_desc` varchar(4096) NOT NULL COMMENT '告警描述',
- `receiver_group_id` bigint(20) DEFAULT NULL COMMENT '接收组 ID',
- `state` varchar(255) NOT NULL COMMENT '状态',
- `is_delete` varchar(255) DEFAULT NULL COMMENT '是否删除',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `update_time` datetime DEFAULT NULL COMMENT '修改时间',
- `cluster_id` int(10) DEFAULT NULL COMMENT '集群id',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 134002 DEFAULT CHARSET=utf8mb4 COMMENT = '规则表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_group
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_group`;
-CREATE TABLE `t_ddh_cluster_group` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `group_name` varchar(255) DEFAULT NULL,
- `cluster_id` int(10) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_host
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_host`;
-CREATE TABLE `t_ddh_cluster_host` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `hostname` varchar(32) DEFAULT NULL COMMENT '主机名',
- `ip` varchar(32) DEFAULT NULL COMMENT 'IP',
- `rack` varchar(32) DEFAULT NULL COMMENT '机架',
- `core_num` int(11) DEFAULT NULL COMMENT '核数',
- `total_mem` int(11) DEFAULT NULL COMMENT '总内存',
- `total_disk` int(11) DEFAULT NULL COMMENT '总磁盘',
- `used_mem` int(11) DEFAULT NULL COMMENT '已用内存',
- `used_disk` int(11) DEFAULT NULL COMMENT '已用磁盘',
- `average_load` varchar(32) DEFAULT NULL COMMENT '平均负载',
- `check_time` datetime DEFAULT NULL COMMENT '检测时间',
- `cluster_id` varchar(32) DEFAULT NULL COMMENT '集群id',
- `host_state` int(2) DEFAULT NULL COMMENT '1:健康 2、有一个角色异常3、有多个角色异常',
- `managed` int(2) DEFAULT NULL COMMENT '1:受管 2:断线',
- `cpu_architecture` varchar(255) DEFAULT NULL COMMENT 'cpu架构',
- `node_label` varchar(255) DEFAULT NULL COMMENT '节点标签',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群主机表 ' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_info
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_info`;
-CREATE TABLE `t_ddh_cluster_info` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `create_by` varchar(128) DEFAULT NULL COMMENT '创建人',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `cluster_name` varchar(128) DEFAULT NULL COMMENT '集群名称',
- `cluster_code` varchar(128) DEFAULT NULL COMMENT '集群编码',
- `cluster_frame` varchar(128) DEFAULT NULL COMMENT '集群框架',
- `frame_version` varchar(128) DEFAULT NULL COMMENT '集群版本',
- `cluster_state` int(11) DEFAULT NULL COMMENT '集群状态 1:待配置2:正在运行',
- `frame_id` int(10) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群信息表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_node_label
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_node_label`;
-CREATE TABLE `t_ddh_cluster_node_label` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `cluster_id` int(10) DEFAULT NULL,
- `node_label` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_queue_capacity
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_queue_capacity`;
-CREATE TABLE `t_ddh_cluster_queue_capacity` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `cluster_id` int(10) DEFAULT NULL,
- `queue_name` varchar(255) DEFAULT NULL,
- `capacity` varchar(255) DEFAULT NULL,
- `node_label` varchar(255) DEFAULT NULL,
- `acl_users` varchar(255) DEFAULT NULL,
- `parent` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_cluster_rack
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_rack`;
-CREATE TABLE `t_ddh_cluster_rack` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `rack` varchar(255) DEFAULT NULL,
- `cluster_id` int(10) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_role_user
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_role_user`;
-CREATE TABLE `t_ddh_cluster_role_user` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `cluster_id` int(11) DEFAULT NULL COMMENT '集群id',
- `user_type` int(2) DEFAULT NULL COMMENT '集群用户类型1:管理员2:普通用户',
- `user_id` int(11) DEFAULT NULL COMMENT '用户id',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群角色用户中间表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_command
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command`;
-CREATE TABLE `t_ddh_cluster_service_command` (
- `command_id` varchar(128) NOT NULL COMMENT '主键',
- `create_by` varchar(32) DEFAULT NULL COMMENT '创建人',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `command_name` varchar(256) DEFAULT NULL COMMENT '命令名称',
- `command_state` int(11) DEFAULT NULL COMMENT '命令状态 0:待运行 1:正在运行2:成功3:失败4、取消',
- `command_progress` int(11) DEFAULT NULL COMMENT '命令进度',
- `cluster_id` int(10) DEFAULT NULL,
- `service_name` varchar(128) DEFAULT NULL,
- `command_type` int(2) DEFAULT NULL COMMENT '命令类型1:安装服务 2:启动服务 3:停止服务 4:重启服务 5:更新配置后启动 6:更新配置后重启',
- `end_time` datetime DEFAULT NULL COMMENT '结束时间',
- `service_instance_id` int(10) DEFAULT NULL COMMENT '服务实例id',
- UNIQUE INDEX `command_id`(`command_id`)
-) DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务操作指令表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_command_host
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command_host`;
-CREATE TABLE `t_ddh_cluster_service_command_host` (
- `command_host_id` varchar(128) NOT NULL DEFAULT '1' COMMENT '主键',
- `hostname` varchar(32) DEFAULT NULL COMMENT '主机',
- `command_state` int(11) DEFAULT NULL COMMENT '命令状态 1:正在运行2:成功3:失败4、取消',
- `command_progress` int(11) DEFAULT NULL COMMENT '命令进度',
- `command_id` varchar(128) DEFAULT NULL COMMENT '操作指令id',
- `create_time` datetime DEFAULT NULL,
- UNIQUE INDEX `command_host_id`(`command_host_id`) ,
- UNIQUE INDEX `command_host_id_2`(`command_host_id`)
-) DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务操作指令主机表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_command_host_command
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_command_host_command`;
-CREATE TABLE `t_ddh_cluster_service_command_host_command` (
- `host_command_id` varchar(128) NOT NULL DEFAULT '1' COMMENT '主键',
- `command_name` varchar(256) DEFAULT NULL COMMENT '指令名称',
- `command_state` int(11) DEFAULT NULL COMMENT '指令状态',
- `command_progress` int(11) DEFAULT NULL COMMENT '指令进度',
- `command_host_id` varchar(128) DEFAULT NULL COMMENT '主机id',
- `hostname` varchar(128) DEFAULT NULL COMMENT '主机',
- `service_role_name` varchar(128) DEFAULT NULL COMMENT '服务角色名称',
- `service_role_type` int(2) DEFAULT NULL COMMENT '服务角色类型',
- `command_id` varchar(128) DEFAULT NULL COMMENT '指令id',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `command_type` int(2) DEFAULT NULL COMMENT '1:安装服务 2:启动服务 3:停止服务 4:重启服务 5:更新配置后启动 6:更新配置后重启',
- `result_msg` text NULL,
- UNIQUE INDEX `host_command_id`(`host_command_id`)
-) DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务操作指令主机指令表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_dashboard
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_dashboard`;
-CREATE TABLE `t_ddh_cluster_service_dashboard` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主机',
- `service_name` varchar(128) DEFAULT NULL COMMENT '服务名称',
- `dashboard_url` varchar(256) DEFAULT NULL COMMENT '总览页面地址',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 22 DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务总览仪表盘' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_instance
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_instance`;
-CREATE TABLE `t_ddh_cluster_service_instance` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `cluster_id` int(11) DEFAULT NULL COMMENT '集群id',
- `service_name` varchar(32) DEFAULT NULL COMMENT '服务名称',
- `service_state` int(11) DEFAULT NULL COMMENT '服务状态 1、待安装 2:正在运行 3:存在告警 4:存在异常',
- `update_time` datetime DEFAULT NULL COMMENT '更新时间',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `need_restart` int(2) DEFAULT NULL COMMENT '是否需要重启 1:正常 2:需要重启',
- `frame_service_id` int(10) DEFAULT NULL COMMENT '框架服务id',
- `sort_num` int(2) DEFAULT NULL COMMENT '排序字段',
- `label` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_instance_role_group
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_instance_role_group`;
-CREATE TABLE `t_ddh_cluster_service_instance_role_group` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `role_group_name` varchar(255) DEFAULT NULL,
- `service_instance_id` int(11) DEFAULT NULL,
- `service_name` varchar(255) DEFAULT NULL,
- `cluster_id` int(11) DEFAULT NULL,
- `role_group_type` varchar(255) DEFAULT NULL,
- `create_time` datetime DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_role_group_config
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_group_config`;
-CREATE TABLE `t_ddh_cluster_service_role_group_config` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `role_group_id` int(10) DEFAULT NULL,
- `config_json` text NULL,
- `config_json_md5` varchar(255) DEFAULT NULL,
- `config_version` int(2) DEFAULT NULL,
- `config_file_json` text NULL,
- `config_file_json_md5` varchar(255) DEFAULT NULL,
- `cluster_id` int(10) DEFAULT NULL,
- `create_time` datetime DEFAULT NULL,
- `update_time` datetime DEFAULT NULL,
- `service_name` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_role_instance
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_instance`;
-CREATE TABLE `t_ddh_cluster_service_role_instance` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `service_role_name` varchar(32) DEFAULT NULL COMMENT '服务角色名称',
- `hostname` varchar(32) DEFAULT NULL COMMENT '主机',
- `service_role_state` int(2) DEFAULT NULL COMMENT '服务角色状态 1:正在运行2:停止',
- `update_time` datetime DEFAULT NULL COMMENT '更新时间',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `service_id` int(11) DEFAULT NULL COMMENT '服务id',
- `role_type` int(11) DEFAULT NULL COMMENT '角色类型 1:master2:worker3:client',
- `cluster_id` int(10) DEFAULT NULL COMMENT '集群id',
- `service_name` varchar(255) DEFAULT NULL COMMENT '服务名称',
- `role_group_id` int(10) DEFAULT NULL COMMENT '角色组id',
- `need_restart` int(10) DEFAULT NULL COMMENT '是否需要重启 1:正常 2:需要重启',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务角色实例表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_service_role_instance_webuis
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_service_role_instance_webuis`;
-CREATE TABLE `t_ddh_cluster_service_role_instance_webuis` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `service_role_instance_id` int(10) DEFAULT NULL COMMENT '服务角色id',
- `web_url` varchar(256) DEFAULT NULL COMMENT 'URL地址',
- `service_instance_id` int(10) DEFAULT NULL,
- `name` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群服务角色对应web ui表 ' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_cluster_user
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_user`;
-CREATE TABLE `t_ddh_cluster_user` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `username` varchar(255) DEFAULT NULL,
- `cluster_id` int(10) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_user_group
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_user_group`;
-CREATE TABLE `t_ddh_cluster_user_group` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `user_id` int(10) DEFAULT NULL,
- `group_id` int(10) DEFAULT NULL,
- `cluster_id` int(10) DEFAULT NULL,
- `user_group_type` int(2) DEFAULT NULL COMMENT '1:主用户组 2:附加组',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_variable
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_variable`;
-CREATE TABLE `t_ddh_cluster_variable` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `cluster_id` int(10) DEFAULT NULL,
- `variable_name` varchar(255) DEFAULT NULL,
- `variable_value` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_cluster_yarn_queue
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_yarn_queue`;
-CREATE TABLE `t_ddh_cluster_yarn_queue` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `queue_name` varchar(255) DEFAULT NULL,
- `min_core` int(10) DEFAULT NULL,
- `min_mem` int(10) DEFAULT NULL,
- `max_core` int(10) DEFAULT NULL,
- `max_mem` int(10) DEFAULT NULL,
- `app_num` int(10) DEFAULT NULL,
- `weight` int(2) DEFAULT NULL,
- `schedule_policy` varchar(255) DEFAULT NULL COMMENT 'fifo ,fair ,drf',
- `allow_preemption` int(2) DEFAULT NULL COMMENT '1: true 2:false',
- `cluster_id` int(10) DEFAULT NULL,
- `am_share` varchar(255) DEFAULT NULL,
- `create_time` datetime DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_yarn_scheduler
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_yarn_scheduler`;
-CREATE TABLE `t_ddh_cluster_yarn_scheduler` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `cluster_id` int(11) DEFAULT NULL,
- `scheduler` varchar(255) DEFAULT NULL,
- `in_use` int(2) DEFAULT NULL COMMENT '1: 是 2:否',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_cluster_zk
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_cluster_zk`;
-CREATE TABLE `t_ddh_cluster_zk` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `zk_server` varchar(255) DEFAULT NULL,
- `myid` int(10) DEFAULT NULL,
- `cluster_id` int(10) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_command
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_command`;
-CREATE TABLE `t_ddh_command` (
- `id` int(10) NOT NULL,
- `command_type` int(2) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_frame_info
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_frame_info`;
-CREATE TABLE `t_ddh_frame_info` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `frame_name` varchar(128) DEFAULT NULL COMMENT '框架名称',
- `frame_code` varchar(128) DEFAULT NULL COMMENT '框架编码',
- `frame_version` varchar(128) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群框架表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_frame_service
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_frame_service`;
-CREATE TABLE `t_ddh_frame_service` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `frame_id` int(11) DEFAULT NULL COMMENT '版本id',
- `service_name` varchar(32) DEFAULT NULL COMMENT '服务名称',
- `label` varchar(255) DEFAULT NULL,
- `service_version` varchar(32) DEFAULT NULL COMMENT '服务版本',
- `service_desc` varchar(1024) DEFAULT NULL COMMENT '服务描述',
- `dependencies` varchar(255) DEFAULT NULL COMMENT '服务依赖',
- `package_name` varchar(255) DEFAULT NULL COMMENT '安装包名称',
- `service_config` text NULL,
- `service_json` text NULL,
- `service_json_md5` varchar(255) DEFAULT NULL,
- `frame_code` varchar(255) DEFAULT NULL,
- `config_file_json` text NULL,
- `config_file_json_md5` varchar(255) DEFAULT NULL,
- `decompress_package_name` varchar(255) DEFAULT NULL,
- `sort_num` int(2) DEFAULT NULL COMMENT '排序字段',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '集群框架版本服务表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_frame_service_role
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_frame_service_role`;
-CREATE TABLE `t_ddh_frame_service_role` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `service_id` int(11) DEFAULT NULL COMMENT '服务id',
- `service_role_name` varchar(32) DEFAULT NULL COMMENT '角色名称',
- `service_role_type` int(11) DEFAULT NULL COMMENT '角色类型 1:master2:worker3:client',
- `cardinality` varchar(32) DEFAULT NULL,
- `service_role_json` text NULL,
- `service_role_json_md5` varchar(255) DEFAULT NULL,
- `frame_code` varchar(255) DEFAULT NULL,
- `jmx_port` varchar(255) DEFAULT NULL,
- `log_file` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '框架服务角色表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_install_step
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_install_step`;
-CREATE TABLE `t_ddh_install_step` (
- `id` int(10) NOT NULL AUTO_INCREMENT,
- `step_name` varchar(128) DEFAULT NULL,
- `step_desc` varchar(256) DEFAULT NULL,
- `install_type` int(1) DEFAULT NULL COMMENT '1:集群配置2:添加服务3:添加主机',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 10 DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_notice_group
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_notice_group`;
-CREATE TABLE `t_ddh_notice_group` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `notice_group_name` varchar(32) DEFAULT NULL COMMENT '通知组名称',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '通知组表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_notice_group_user
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_notice_group_user`;
-CREATE TABLE `t_ddh_notice_group_user` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `notice_group_id` int(11) DEFAULT NULL COMMENT '通知组id',
- `user_id` int(11) DEFAULT NULL COMMENT '用户id',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '通知组-用户中间表' ROW_FORMAT = DYNAMIC;
-
-
--- ----------------------------
--- Table structure for t_ddh_role_info
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_role_info`;
-CREATE TABLE `t_ddh_role_info` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `role_name` varchar(128) DEFAULT NULL COMMENT '角色名称',
- `role_code` varchar(128) DEFAULT NULL COMMENT '角色编码',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 1 DEFAULT CHARSET=utf8mb4 COMMENT = '角色信息表' ROW_FORMAT = DYNAMIC;
-
--- ----------------------------
--- Table structure for t_ddh_session
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_session`;
-CREATE TABLE `t_ddh_session` (
- `id` varchar(128) NOT NULL,
- `user_id` int(10) DEFAULT NULL,
- `ip` varchar(128) DEFAULT NULL,
- `last_login_time` datetime DEFAULT NULL,
- PRIMARY KEY (`id`)
-) DEFAULT CHARSET=utf8mb4 ROW_FORMAT = DYNAMIC;
-
-
-
--- ----------------------------
--- Table structure for t_ddh_user_info
--- ----------------------------
-DROP TABLE IF EXISTS `t_ddh_user_info`;
-CREATE TABLE `t_ddh_user_info` (
- `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
- `username` varchar(128) DEFAULT NULL COMMENT '用户名',
- `password` varchar(128) DEFAULT NULL COMMENT '密码',
- `email` varchar(128) DEFAULT NULL COMMENT '邮箱',
- `phone` varchar(128) DEFAULT NULL COMMENT '手机号',
- `create_time` datetime DEFAULT NULL COMMENT '创建时间',
- `user_type` int(2) DEFAULT NULL COMMENT '1:超级管理员 2:普通用户',
- PRIMARY KEY (`id`)
-) AUTO_INCREMENT = 2 DEFAULT CHARSET=utf8mb4 COMMENT = '用户信息表' ROW_FORMAT = DYNAMIC;
-
-
-SET FOREIGN_KEY_CHECKS = 1;
diff --git a/datasophon-init/sql/V1.1.0__DML.sql b/datasophon-init/sql/V1.1.0__DML.sql
index 3d6be3e0..e69de29b 100644
--- a/datasophon-init/sql/V1.1.0__DML.sql
+++ b/datasophon-init/sql/V1.1.0__DML.sql
@@ -1,418 +0,0 @@
--- ----------------------------
--- Records of t_ddh_access_token
--- ----------------------------
-INSERT INTO `t_ddh_access_token` VALUES (0, 1, 'test', '2022-06-15 09:51:54', '2022-06-15 09:51:57', '2023-01-01 09:51:59');
-
--- ----------------------------
--- Records of t_ddh_alert_group
--- ----------------------------
-INSERT INTO `t_ddh_alert_group` VALUES (1, 'HIVE告警组', 'HIVE', '2022-07-14 15:52:45');
-INSERT INTO `t_ddh_alert_group` VALUES (2, 'HDFS告警组', 'HDFS', '2022-07-14 15:52:47');
-INSERT INTO `t_ddh_alert_group` VALUES (3, 'YARN告警组', 'YARN', '2022-07-14 15:52:50');
-INSERT INTO `t_ddh_alert_group` VALUES (8, 'HBASE告警组', 'HBASE', '2022-07-14 15:52:52');
-INSERT INTO `t_ddh_alert_group` VALUES (10, 'KAFKA告警组', 'KAFKA', '2022-07-14 15:52:57');
-INSERT INTO `t_ddh_alert_group` VALUES (11, '主机告警组', 'NODE', '2022-07-14 15:52:59');
-INSERT INTO `t_ddh_alert_group` VALUES (12, 'ZOOKEEPER告警组', 'ZOOKEEPER', '2022-07-14 15:53:02');
-INSERT INTO `t_ddh_alert_group` VALUES (13, 'ALERTMANAGER告警组', 'ALERTMANAGER', '2022-07-14 15:53:05');
-INSERT INTO `t_ddh_alert_group` VALUES (14, 'GRAFANA告警组', 'GRAFANA', '2022-07-14 15:53:07');
-INSERT INTO `t_ddh_alert_group` VALUES (15, 'PROMETHEUS告警组', 'PROMETHEUS', '2022-07-14 15:53:09');
-INSERT INTO `t_ddh_alert_group` VALUES (16, 'SPARK告警组', 'SPARK3', '2022-07-15 14:12:38');
-INSERT INTO `t_ddh_alert_group` VALUES (17, 'TRINO告警组', 'TRINO', '2022-07-24 23:23:01');
-INSERT INTO `t_ddh_alert_group` VALUES (18, 'RANGER告警组', 'RANGER', '2022-09-09 11:29:14');
-INSERT INTO `t_ddh_alert_group` VALUES (19, 'STARROCKS告警组', 'STARROCKS', '2022-09-13 14:53:57');
-INSERT INTO `t_ddh_alert_group` VALUES (20, 'ELASTICSEARCH告警组', 'ELASTICSEARCH', '2022-10-08 16:15:55');
-INSERT INTO `t_ddh_alert_group` VALUES (21, 'DS告警组', 'DS', '2022-11-20 21:00:00');
-INSERT INTO `t_ddh_alert_group` VALUES (22, 'SP告警组', 'STREAMPARK', '2022-11-21 18:20:10');
-INSERT INTO `t_ddh_alert_group` VALUES (23, 'Doris告警组', 'DORIS', '2023-01-07 22:12:36');
-
-
-
--- ----------------------------
--- Records of t_ddh_cluster_alert_expression
--- ----------------------------
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101001, '主机内存使用率(%)', '(1-(node_memory_MemAvailable_bytes/(node_memory_MemTotal_bytes)))*100', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101002, '主机CPU使用率(%)', '(1-avg(irate(node_cpu_seconds_total{mode=\"idle\"}[5m]))by(instance))*100', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101003, '主机CPU系统使用率(%)', 'avg(irate(node_cpu_seconds_total{mode=\"system\"}[5m]))by(instance)*100', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101004, '主机CPU用户使用率(%)', 'avg(irate(node_cpu_seconds_total{mode=\"user\"}[5m]))by(instance)*100', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101005, '主机磁盘IO使用率(%)', 'avg(irate(node_cpu_seconds_total{mode=\"iowait\"}[5m]))by(instance)*100', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101006, '主机交换分区使用率(%)', '(1-((node_memory_SwapFree_bytes+1)/(node_memory_SwapTotal_bytes+1)))*100', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101007, '主机磁盘使用率(%)', '(node_filesystem_size_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}-node_filesystem_free_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"})*100/(node_filesystem_avail_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}+(node_filesystem_size_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}-node_filesystem_free_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}))', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101008, '主机入网带宽', 'irate(node_network_receive_bytes_total[5m])*8', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101009, '主机出网带宽', 'irate(node_network_transmit_bytes_total[5m])*8', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101010, '系统平均负载[1m]', 'node_load1', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101011, '系统平均负载[5m]', 'node_load5', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101012, '系统平均负载[15m]', 'node_load15', 'NODE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101013, 'Ntp服务存活', 'cluster_basic_isNtpServiceAlive', 'NODE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (101014, 'Ntp时间同步', 'cluster_basic_isNtpClockSyncNormal', 'NODE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (102001, 'AlertManager进程存活', 'alertmanager_isAlertmanagerProcessAlive', 'ALERTMANAGER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (103001, 'Elasticsearch进程存活', 'Elastic_isEsProcessAlive', 'ELASTICSEARCH', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (103002, 'ElasticsearchCPU使用率(%)', 'es_os_cpu_percent{job=\"ELASTICSEARCH-ElasticSearch\"}', 'ELASTICSEARCH', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (103003, 'Elasticsearch内存使用率(%)', 'es_os_mem_used_percent', 'ELASTICSEARCH', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (103004, 'Elasticsearch磁盘使用率(%)', '100-es_fs_path_available_bytes*100/es_fs_path_total_bytes', 'ELASTICSEARCH', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (104001, 'FlinkHistoryServer进程存活', 'up{job=\"FLINK-FlinkHistoryServer\"}', 'FLINK', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (105001, 'Grafana进程存活', 'grafana_isGrafanaProcessAlive', 'GRAFANA', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (106001, 'HBaseMaster进程存活', 'hbase_isHMasterProcessAlive', 'HBASE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (106002, 'HRegionServer进程存活', 'hbase_isHRegionServerProcessAlive', 'HBASE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (106003, 'HThriftServer进程存活', 'hbase_isHThriftServerProcessAlive', 'HBASE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107001, 'NameNode进程存活', 'hdfs_isNameNodeProcessAlive', 'HDFS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107002, 'NameNodeRPC延迟[5m]', 'avg_over_time(Hadoop_NameNode_RpcProcessingTimeAvgTime{job=\"HDFS-NameNode\"}[5m])', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107003, 'NameNodeRPC延迟[15m]', 'avg_over_time(Hadoop_NameNode_RpcProcessingTimeAvgTime{job=\"HDFS-NameNode\"}[15m])', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107004, 'NameNode堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"HDFS-NameNode\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"HDFS-NameNode\"}', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107005, 'NameNode老年代GC持续时间[5m]', 'avg_over_time(Hadoop_NameNode_GcTimeMillisConcurrentMarkSweep{job=\"HDFS-NameNode\"}[5m])/(5*60*1000)', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107006, 'NameNode新生代GC持续时间[5m]', 'avg_over_time(Hadoop_NameNode_GcTimeMillisParNew{job=\"HDFS-NameNode\"}[5m])/(5*60*1000)', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107007, 'NameNodeGC持续时间[5m]', 'avg_over_time(Hadoop_NameNode_GcTimeMillis{job=\"HDFS-NameNode\"}[5m])/(5*60*1000)', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107008, 'DataNode进程存活', 'hdfs_isDataNodeProcessAlive', 'HDFS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107009, 'DataNodeRPC[5m]', 'avg_over_time(Hadoop_DataNode_RpcProcessingTimeAvgTime{job=\"HDFS-DataNode\"}[5m])', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107010, 'DataNodeRPC[15m]', 'avg_over_time(Hadoop_DataNode_RpcProcessingTimeAvgTime{job=\"HDFS-DataNode\"}[15m])', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107011, 'DataNode堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"HDFS-DataNode\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"HDFS-DataNode\"}', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107012, 'DataNode老年代GC持续时间[5m]', 'avg_over_time(Hadoop_DataNode_GcTimeMillisConcurrentMarkSweep{job=\"HDFS-DataNode\"}[5m])/(5*60*1000)', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107013, 'DataNode新生代GC持续时间[5m]', 'avg_over_time(Hadoop_DataNode_GcTimeMillisParNew{job=\"HDFS-DataNode\"}[5m])/(5*60*1000)', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107014, 'DataNodeGC持续时间[5m]', 'avg_over_time(Hadoop_DataNode_GcTimeMillis{job=\"HDFS-DataNode\"}[5m])/(5*60*1000)', 'HDFS', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107015, 'JournalNode进程存活', 'hdfs_isJournalNodeProcessAlive', 'HDFS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107016, 'ZKFailoverController进程存活', 'hdfs_isDFSZKFCProcessAlive', 'HDFS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107017, 'HttpFs进程存活', 'supplement_isHttpFsServerProcessNormal', 'HDFS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107018, 'HDFS坏盘', 'Hadoop_NameNode_VolumeFailuresTotal{name=\"FSNamesystem\"}', 'HDFS', 'INT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (107019, 'HDFS块丢失', 'Hadoop_NameNode_MissingBlocks{name=\"FSNamesystem\"}', 'HDFS', 'INT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108001, 'HiveServer2进程存活', 'hive_isHiveServer2ProcessAlive', 'HIVE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108002, 'HiveServer2堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"HIVE-HiveServer2\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"HIVE-HiveServer2\"}', 'HIVE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108003, 'HiveServer2老年代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"HIVE-HiveServer2\",name=\"PS MarkSweep\"}[5m])/(5*60*1000)', 'HIVE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108004, 'HiveServer2新生代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"HIVE-HiveServer2\",name=\"PS Scavenge\"}[5m])/(5*60*1000)', 'HIVE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108005, 'HiveMetastore进程存活', 'hive_isHiveMetaStoreProcessAlive', 'HIVE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108006, 'HiveMetastore堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"HIVE-MetaStore\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"HIVE-MetaStore\"}', 'HIVE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108007, 'HiveMetastore老年代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"HIVE-MetaStore\",name=\"PS MarkSweep\"}[5m])/(5*60*1000)', 'HIVE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108008, 'HiveMetastore新生代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"HIVE-MetaStore\",name=\"PS Scavenge\"}[5m])/(5*60*1000)', 'HIVE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (108009, 'MySQL进程存活', 'hive_isMysqlProcessAlive', 'HIVE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (109001, 'HueServer进程存活', 'hue_isHueProcessAlive', 'HUE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (110001, 'InfluxDB进程存活', 'supplement_isInfluxDBProcessAlive', 'INFLUXDB', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (111001, 'KafkaEagle进程存活', 'KafkaEagle_isKafkaEagleProcessAlive', 'KAFKAEAGLE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (112001, 'Kibana进程存活', 'kibana_isKibanaProcessAlive', 'KIBANA', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (113001, 'KylinServer进程存活', 'up{job=\"KYLIN-KylinServer\"}', 'KYLIN', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (113002, 'KylinServer堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"KYLIN-KylinServer\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"KYLIN-KylinServer\"}', 'KYLIN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (113003, 'KylinServer老年代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"KYLIN-KylinServer\",name=\"ConcurrentMarkSweep\"}[5m])/(5*60*1000)', 'KYLIN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (113004, 'KylinServer新生代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"KYLIN-KylinServer\",name=\"ParNew\"}[5m])/(5*60*1000)', 'KYLIN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (114001, 'LivyServer进程存活', 'up{job=\"LIVY-LivyServer\"}', 'LIVY', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (114002, 'LivyServer堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"LIVY-LivyServer\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"LIVY-LivyServer\"}', 'LIVY', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (114003, 'LivyServer老年代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"LIVY-LivyServer\",name=\"PS MarkSweep\"}[5m])/(5*60*1000)', 'LIVY', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (114004, 'LivyServer新生代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"LIVY-LivyServer\",name=\"PS Scavenge\"}[5m])/(5*60*1000)', 'LIVY', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (115001, 'NodeExporter进程存活', 'up{job=\"NODEEXPORTER-NodeExporter\"}', 'NODEEXPORTER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (116001, 'OozieServer进程存活', 'up{job=\"OOZIE-OozieServer\"}', 'OOZIE', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (116002, 'OozieServer堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"OOZIE-OozieServer\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"OOZIE-OozieServer\"}', 'OOZIE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (116003, 'OozieServer老年代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"OOZIE-OozieServer\",name=\"PS MarkSweep\"}[5m])/(5*60*1000)', 'OOZIE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (116004, 'OozieServer新生代GC持续时间[5m]', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"OOZIE-OozieServer\",name=\"PS Scavenge\"}[5m])/(5*60*1000)', 'OOZIE', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (117001, 'Prometheus进程存活', 'up{job=\"prometheus\"}', 'PROMETHEUS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (118001, 'RangerServer进程存活', 'up{job=\"RANGER-RangerAdmin\"}', 'RANGER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (119001, 'SparkHistoryServer进程存活', 'spark_isHistoryServerProcessAlive', 'SPARK', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (120001, 'TezUI进程存活', 'hive_isTezUIProcessAlive', 'TEZ', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (121001, 'MonitorAgent进程存活', 'up{job=\"USDPMONITOR-MonitorAgent\"}', 'USDPMONITOR', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (122001, 'ZkUI进程存活', 'zk_isZKUIProcessAlive', 'ZKUI', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (123001, 'QuarumPeermain进程存活', 'zk_isZKProcessAlive', 'ZOOKEEPER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124001, 'ResourceManager进程存活', 'yarn_isResourceManagerProcessAlive', 'YARN', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124002, 'ResourceManager堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"YARN-ResourceManager\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"YARN-ResourceManager\"}', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124003, 'ResourceManager老年代GC持续时间[5m]', 'avg_over_time(Hadoop_ResourceManager_GcTimeMillisPS_MarkSweep{job=\"YARN-ResourceManager\"}[5m])/(5*60*1000)', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124004, 'ResourceManager新生代GC持续时间[5m]', 'avg_over_time(Hadoop_ResourceManager_GcTimeMillisPS_Scavenge{job=\"YARN-ResourceManager\"}[5m])/(5*60*1000)', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124005, 'ResourceManagerGC持续时间[5m]', 'avg_over_time(Hadoop_ResourceManager_GcTimeMillis{job=\"YARN-ResourceManager\"}[5m])/(5*60*1000)', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124006, 'NodeManager进程存活', 'yarn_isNodeManagerProcessAlive', 'YARN', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124007, 'NodeManager堆内存使用率(%)', 'java_lang_Memory_HeapMemoryUsage_used{job=\"YARN-NodeManager\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"YARN-NodeManager\"}', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124008, 'NodeManager老年代GC持续时间[5m]', 'avg_over_time(Hadoop_NodeManager_GcTimeMillisPS_MarkSweep{job=\"YARN-NodeManager\"}[5m])/(5*60*1000)', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124009, 'NodeManager新生代GC持续时间[5m]', 'avg_over_time(Hadoop_NodeManager_GcTimeMillisPS_Scavenge{job=\"YARN-NodeManager\"}[5m])/(5*60*1000)', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (124010, 'NodeManagerGC持续时间[5m]', 'avg_over_time(Hadoop_NodeManager_GcTimeMillis{job=\"YARN-NodeManager\"}[5m])/(5*60*1000)', 'YARN', 'FLOAT', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (125001, 'PrestoCoordinator进程存活', 'presto_isCoordinatorProcessAlive', 'PRESTO', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (125002, 'PrestoWorker进程存活', 'presto_isWorkerProcessAlive', 'PRESTO', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (126001, 'UdsMaster进程存活', 'uds_isMasterProcessAlive', 'UDS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (126002, 'UdsWorker进程存活', 'uds_isWorkerProcessAlive', 'UDS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (126003, 'UdsWeb进程存活', 'uds_isWebProcessAlive', 'UDS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (127001, 'KuduMaster进程存活', 'kudu_isMasterProcessAlive', 'KUDU', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (127002, 'KuduTserver进程存活', 'kudu_isTServerProcessAlive', 'KUDU', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (128001, 'ImpalaImpalad进程存活', 'impala_isImpaladProcessAlive', 'IMPALA', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (128002, 'ImpalaCatalog进程存活', 'impala_isCatalogdProcessAlive', 'IMPALA', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (128003, 'ImpalaStatestored进程存活', 'impala_isStatestoredProcessAlive', 'IMPALA', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (129001, 'ZeppelinServer进程存活', 'supplement_isZeppelinServerProcessNormal', 'ZEPPELIN', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (130001, 'AirflowWebserver进程存活', 'supplement_isAirflowWebserverProcessAlive', 'AIRFLOW', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (130002, 'AirflowScheduler进程存活', 'supplement_isAirflowSchedulerProcessAlive', 'AIRFLOW', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (131001, 'AtlasIndexServer进程存活', 'supplement_isAtlasIndexServerProcessAlive', 'ATLAS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (131002, 'AtlasServer进程存活', 'supplement_isAtlasServerProcessAlive', 'ATLAS', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (132001, 'AlertServer进程存活', 'DolphinScheduler_isAlertProcessAlive', 'DOLPHINSCHEDULER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (132002, 'ApiServer进程存活', 'DolphinScheduler_isAPIProcessAlive', 'DOLPHINSCHEDULER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (132003, 'LoggerServer进程存活', 'DolphinScheduler_isLoggerProcessAlive', 'DOLPHINSCHEDULER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (132004, 'MasterServer进程存活', 'DolphinScheduler_isMasterProcessAlive', 'DOLPHINSCHEDULER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (132005, 'WorkerServer进程存活', 'DolphinScheduler_isWorkerProcessAlive', 'DOLPHINSCHEDULER', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (133001, 'TrinoCoordinator进程存活', 'trino_isCoordinatorProcessAlive', 'TRINO', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (133002, 'TrinoWorker进程存活', 'trino_isWorkerProcessAlive', 'TRINO', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_expression` VALUES (134001, 'Neo4j进程存活', 'supplement_isNeo4jServerProcessAlive', 'NEO4J', 'BOOL', 'TRUE', 'VALID', 'FALSE', NULL, NULL);
-
-
--- ----------------------------
--- Records of t_ddh_cluster_alert_quota
--- ----------------------------
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (464, '主机内存使用率', 'NODE', '(1-(node_memory_MemAvailable_bytes/(node_memory_MemTotal_bytes)))*100', 1, 11, 1, '内存使用率过高,可迁出部分进程', '>', 95, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (465, '主机CPU使用率', 'NODE', '(1-avg(irate(node_cpu_seconds_total{mode=\"idle\"}[5m]))by(instance))*100', 1, 11, 1, 'cpu使用率过高,注意观察cpu密集型进程,必要时需停止或迁移该进程', '>', 90, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (466, '主机CPU系统使用率', 'NODE', 'avg(irate(node_cpu_seconds_total{mode=\"system\"}[5m]))by(instance)*100', 1, 11, 1, 'cpu使用过高,评估是否有任务倾斜', '>', 95, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (467, '主机CPU用户使用率', 'NODE', 'avg(irate(node_cpu_seconds_total{mode=\"user\"}[5m]))by(instance)*100', 2, 11, 1, 'cpu使用过高,评估是否有任务倾斜', '>', 95, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (468, '主机磁盘IO使用率', 'NODE', 'avg(irate(node_cpu_seconds_total{mode=\"iowait\"}[5m]))by(instance)*100', 1, 11, 1, '磁盘IO过高,评估任务执行是否过于密集', '>', 95, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (469, '主机交换分区使用率', 'NODE', '(1-((node_memory_SwapFree_bytes+1)/(node_memory_SwapTotal_bytes+1)))*100', 1, 11, 1, '主机交换分区使用率过高,评估是否存在任务密集执行', '>', 95, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (470, '主机磁盘使用率', 'NODE', '(node_filesystem_size_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}-node_filesystem_free_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"})*100/(node_filesystem_avail_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}+(node_filesystem_size_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}-node_filesystem_free_bytes{fstype=~\"ext.*|xfs\",mountpoint!~\".*pod.*\"}))', 1, 11, 1, '请清理磁盘', '>', 85, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (471, '主机入网带宽', 'NODE', 'irate(node_network_receive_bytes_total[5m])*8', 1, 11, 1, '网络流量过高', '>', 8589934592, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (472, '主机出网带宽', 'NODE', 'irate(node_network_transmit_bytes_total[5m])*8', 1, 11, 1, '网络流量过高', '>', 8589934592, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (473, '系统平均负载[1m]', 'NODE', 'node_load1', 1, 11, 1, '系统负载过高', '>', 100, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (474, '系统平均负载[5m]', 'NODE', 'node_load5', 1, 11, 1, '系统负载过高', '>', 100, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (475, '系统平均负载[15m]', 'NODE', 'node_load15', 1, 11, 1, '系统负载过高', '>', 100, 1, 1, 60, 'node', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (479, 'NameNode进程存活', 'HDFS', 'up{job=\"namenode\"}', 2, 2, 2, '查看日志,分析宕机原因,解决问题后重新启动', '!=', 1, 1, 1, 15, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (482, 'NameNode堆内存使用率', 'HDFS', '(Hadoop_NameNode_MemHeapUsedM/Hadoop_NameNode_MemHeapMaxM )*100', 1, 2, 2, 'NameNode堆内存不足,增大NameNode堆内存', '>', 95, 1, 1, 60, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (483, 'NameNode老年代GC持续时间[5m]', 'HDFS', 'avg_over_time(Hadoop_NameNode_GcTimeMillisPS_MarkSweep{job=\"namenode\"}[5m])/1000', 1, 2, 2, '老年代GC时间过长,可考虑加大堆内存', '>', 60, 1, 1, 60, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (484, 'NameNode新生代GC持续时间[5m]', 'HDFS', 'avg_over_time(Hadoop_NameNode_GcTimeMillisPS_Scavenge{job=\"namenode\"}[5m])/1000', 1, 2, 2, '新生代GC时间过长,可考虑加大堆内存', '>', 60, 1, 1, 60, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (485, 'NameNodeGC持续时间[5m]', 'HDFS', 'avg_over_time(Hadoop_NameNode_GcTimeMillis{job=\"namenode\"}[5m])/1000', 1, 2, 2, 'GC时间过长,可考虑加大堆内存', '>', 60, 1, 1, 60, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (486, 'DataNode进程存活', 'HDFS', 'up{job=\"datanode\"}', 2, 2, 2, '查看日志,分析宕机原因,解决问题后重新启动', '!=', 1, 1, 1, 15, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (487, 'DataNodeRPC[5m]', 'HDFS', 'avg_over_time(Hadoop_DataNode_RpcProcessingTimeAvgTime{job=\"datanode\"}[5m])', 1, 2, 2, '请检查网络流量使用情况', '>', 5, 1, 1, 60, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (488, 'DataNodeRPC[15m]', 'HDFS', 'avg_over_time(Hadoop_DataNode_RpcProcessingTimeAvgTime{job=\"datanode\"}[15m])', 1, 2, 2, '请检查网络流量使用情况', '>', 5, 1, 1, 60, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (489, 'DataNode堆内存使用率', 'HDFS', '(Hadoop_DataNode_MemHeapUsedM/Hadoop_DataNode_MemHeapMaxM )*100', 1, 2, 2, 'NameNode堆内存不足,增大NameNode堆内存', '>', 95, 1, 1, 60, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (490, 'DataNode老年代GC持续时间[5m]', 'HDFS', 'avg_over_time(Hadoop_DataNode_GcTimeMillisPS_MarkSweep{job=\"datanode\"}[5m])/(1000)', 1, 2, 2, '老年代GC时间过长,可考虑加大堆内存', '>', 60, 1, 1, 60, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (491, 'DataNode新生代GC持续时间[5m]', 'HDFS', 'avg_over_time(Hadoop_DataNode_GcTimeMillisPS_Scavenge{job=\"datanode\"}[5m])/(1000)', 1, 2, 2, '新生代GC时间过长,可考虑加大堆内存', '>', 60, 1, 1, 60, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (492, 'DataNodeGC持续时间[5m]', 'HDFS', 'avg_over_time(Hadoop_DataNode_GcTimeMillis{job=\"datanode\"}[5m])/(1000)', 1, 2, 2, 'GC时间过长,可考虑加大堆内存', '>', 60, 1, 1, 60, 'DataNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (493, 'JournalNode进程存活', 'HDFS', 'up{job=\"journalnode\"}', 2, 2, 2, 'JournalNode宕机,请重新启动', '!=', 1, 1, 1, 15, 'JournalNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (494, 'ZKFailoverController进程存活', 'HDFS', 'up{job=\"zkfc\"}', 2, 2, 2, 'ZKFC宕机,请重新启动', '!=', 1, 1, 1, 15, 'ZKFC', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (496, 'HDFS坏盘', 'HDFS', 'Hadoop_NameNode_VolumeFailuresTotal{name=\"FSNamesystem\"}', 1, 2, 2, '存在坏盘', '>', 0, 1, 1, 60, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (497, 'HDFS块丢失', 'HDFS', 'Hadoop_NameNode_MissingBlocks{name=\"FSNamesystem\"}', 1, 2, 2, '存在块丢失', '>', 0, 1, 1, 60, 'NameNode', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (518, 'Grafana进程存活', 'GRAFANA', 'up{job=\"grafana\"}', 2, 14, 1, 'Grafana宕机,请重新启动', '!=', 1, 1, 1, 15, 'Grafana', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (519, 'HBaseMaster进程存活', 'HBASE', 'up{job=\"hbasemaster\"}', 2, 8, 1, 'Hbase Master宕机,请重新启动', '!=', 1, 1, 1, 15, 'HbaseMaster', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (520, 'HRegionServer进程存活', 'HBASE', 'up{job=\"regionserver\"}', 2, 8, 1, 'RegionServer宕机,请重新启动', '!=', 1, 1, 1, 15, 'RegionServer', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (541, 'HiveServer2进程存活', 'HIVE', 'up{job=\"hiveserver2\"}', 2, 1, 1, 'HiveServer2宕机,请重新启动', '!=', 1, 1, 1, 15, 'HiveServer2', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (542, 'HiveServer2堆内存使用率', 'HIVE', 'java_lang_Memory_HeapMemoryUsage_used{job=\"hiveserver2\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"hiveserver2\"}', 1, 1, 1, 'HiveServer2堆内存不足,增大NameNode堆内存', '>', 95, 1, 1, 60, 'HiveServer2', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (543, 'HiveServer2老年代GC持续时间[5m]', 'HIVE', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"hiveserver2\",name=\"PS MarkSweep\"}[5m])/(1000)', 1, 1, 1, '请联系管理员', '>', 60, 1, 1, 60, 'HiveServer2', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (544, 'HiveServer2新生代GC持续时间[5m]', 'HIVE', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"hiveserver2\",name=\"PS Scavenge\"}[5m])/(1000)', 1, 1, 1, '请联系管理员', '>', 60, 1, 1, 60, 'HiveServer2', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (545, 'HiveMetastore进程存活', 'HIVE', 'up{job=\"hivemetastore\"}', 2, 1, 1, 'HiveMetastore宕机,请重新启动', '!=', 1, 1, 1, 15, 'HiveMetaStore', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (546, 'HiveMetastore堆内存使用率', 'HIVE', 'java_lang_Memory_HeapMemoryUsage_used{job=\"hivemetastore\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"hivemetastore\"}', 1, 1, 1, '请联系管理员', '>', 95, 1, 1, 60, 'HiveMetaStore', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (547, 'HiveMetastore老年代GC持续时间[5m]', 'HIVE', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"hivemetastore\",name=\"PS MarkSweep\"}[5m])/(1000)', 1, 1, 1, '请联系管理员', '>', 60, 1, 1, 60, 'HiveMetaStore', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (548, 'HiveMetastore新生代GC持续时间[5m]', 'HIVE', 'avg_over_time(java_lang_GarbageCollector_CollectionTime{job=\"hivemetastore\",name=\"PS Scavenge\"}[5m])/(1000)', 1, 1, 1, '请联系管理员', '>', 60, 1, 1, 60, 'HiveMetaStore', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (568, 'RangerServer进程存活', 'RANGER', 'up{job=\"rangeradmin\"}', 2, 18, 1, '请联系管理员', '!=', 1, 1, 1, 15, 'RangerAdmin', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (569, 'SparkHistoryServer进程存活', 'SPARK', 'up{job=\"sparkhistoryserver\"}', 2, 16, 1, '请联系管理员', '!=', 1, 1, 1, 15, 'SparkHistoryServer', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (573, 'ZK进程存活', 'ZOOKEEPER', 'up{job=\"zkserver\"}', 2, 12, 1, 'zk宕机,请重新启动', '!=', 1, 1, 1, 15, 'ZkServer', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (574, 'ResourceManager进程存活', 'YARN', 'up{job=\"resourcemanager\"}', 2, 3, 1, 'ResourceManager宕机,请重新启动', '!=', 1, 1, 1, 15, 'ResourceManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (575, 'ResourceManager堆内存使用率', 'YARN', 'java_lang_Memory_HeapMemoryUsage_used{job=\"resourcemanager\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"resourcemanager\"}', 1, 3, 1, '请联系管理员', '>', 95, 1, 1, 60, 'ResourceManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (576, 'ResourceManager老年代GC持续时间[5m]', 'YARN', 'avg_over_time(Hadoop_ResourceManager_GcTimeMillisPS_MarkSweep{job=\"resourcemanager\"}[5m])/(1000)', 1, 3, 1, '请联系管理员', '>', 60, 1, 1, 60, 'ResourceManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (577, 'ResourceManager新生代GC持续时间[5m]', 'YARN', 'avg_over_time(Hadoop_ResourceManager_GcTimeMillisPS_Scavenge{job=\"resourcemanager\"}[5m])/(1000)', 1, 3, 1, '请联系管理员', '>', 60, 1, 1, 60, 'ResourceManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (578, 'ResourceManagerGC持续时间[5m]', 'YARN', 'avg_over_time(Hadoop_ResourceManager_GcTimeMillis{job=\"resourcemanager\"}[5m])/(1000)', 1, 3, 1, '请联系管理员', '>', 60, 1, 1, 60, 'ResourceManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (579, 'NodeManager进程存活', 'YARN', 'up{job=\"nodemanager\"}', 2, 3, 1, 'NodeManager宕机,请重新启动', '!=', 1, 1, 1, 15, 'NodeManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (580, 'NodeManager堆内存使用率', 'YARN', 'java_lang_Memory_HeapMemoryUsage_used{job=\"nodemanager\"}*100/java_lang_Memory_HeapMemoryUsage_max{job=\"nodemanager\"}', 1, 3, 1, '请联系管理员', '>', 95, 1, 1, 60, 'NodeManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (581, 'NodeManager老年代GC持续时间[5m]', 'YARN', 'avg_over_time(Hadoop_NodeManager_GcTimeMillisPS_MarkSweep{job=\"nodemanager\"}[5m])/(1000)', 1, 3, 1, '请联系管理员', '>', 60, 1, 1, 60, 'NodeManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (582, 'NodeManager新生代GC持续时间[5m]', 'YARN', 'avg_over_time(Hadoop_NodeManager_GcTimeMillisPS_Scavenge{job=\"nodemanager\"}[5m])/(1000)', 1, 3, 1, '请联系管理员', '>', 60, 1, 1, 60, 'NodeManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (583, 'NodeManagerGC持续时间[5m]', 'YARN', 'avg_over_time(Hadoop_NodeManager_GcTimeMillis{job=\"nodemanager\"}[5m])/(1000)', 1, 3, 1, '请联系管理员', '>', 60, 1, 1, 60, 'NodeManager', 1, '2022-07-14 14:22:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (611, 'KafkaBorker进程存活', 'KAFKA', 'up{job=\"kafkabroker\"}', 2, 10, 1, 'KafkaBroker宕机,请重新启动', '!=', 1, 1, 1, 15, 'KafkaBroker', 1, '2022-07-15 14:32:25');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (612, 'TrinoCoordinator进程存活', 'TRINO', 'up{job=\"trinocoordinator\"}', 2, 17, 1, '重新启动', '!=', 1, 1, 1, 15, 'TrinoCoordinator', 1, NULL);
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (613, 'TrinoWorker进程存活', 'TRINO', 'up{job=\"trinoworker\"}', 2, 17, 1, '重新启动', '!=', 1, 1, 1, 15, 'TrinoWorker', 1, NULL);
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (614, '主机采集器Node状态', 'NODE', 'up{job=\"node\"}', 1, 11, 1, '重新启动该Node服务', '!=', 1, 1, 1, 0, 'node', 1, NULL);
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (615, 'FE进程存活', 'STARROCKS', 'up{group=\'fe\',job=\"StarRocks\"}', 2, 19, 1, '重新启动', '!=', 1, 1, 1, 15, 'SRFE', 1, '2022-09-13 14:54:39');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (616, 'BE进程存活', 'STARROCKS', 'up{group=\'be\',job=\"StarRocks\"}', 2, 19, 1, '重新启动', '!=', 1, 1, 1, 15, 'SRBE', 1, '2022-09-13 14:55:16');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (617, 'SparkMaster进程存活', 'SPARK3', 'up{job=\"sparkmaster\"}', 2, 16, 1, '重新启动', '!=', 1, 1, 1, 15, 'SparkMaster', 1, '2022-09-16 10:24:38');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (618, 'SparkWorker进程存活', 'SPARK3', 'up{job=\"sparkworker\"}', 2, 16, 1, '重新启动', '!=', 1, 1, 1, 15, 'SparkWorker', 1, '2022-09-16 10:25:18');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (619, 'ElasticSearch进程存活', 'ELASTICSEARCH', 'com_datasophon_ddh_worker_metrics_esMetrics_EsUp', 2, 20, 1, '重新启动', '!=', 1, 1, 1, 15, 'ElasticSearch', 1, '2022-10-08 16:17:00');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (620, 'DS API存活', 'DS', 'up{job=\'apiserver\'}', 2, 21, 1, '重新启动', '!=', 1, 1, 1, 15, 'ApiServer', 1, '2022-11-20 21:00:54');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (621, 'DSMaster存活', 'DS', 'up{job=\'masterserver\'}', 2, 21, 1, '重新启动', '!=', 1, 1, 1, 15, 'MasterServer', 1, '2022-11-20 21:01:33');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (622, 'DSWorker存活', 'DS', 'up{job=\'workerserver\'}', 2, 21, 1, '重新启动', '!=', 1, 1, 1, 15, 'WorkerServer', 1, '2022-11-20 21:02:10');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (623, 'DSAlert存活', 'DS', 'up{job=\'alertserver\'}', 2, 21, 1, '重新启动', '!=', 1, 1, 1, 15, 'AlertServer', 1, '2022-11-20 21:02:46');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (624, 'StreamPark存活', 'STREAMPARK', 'up{job=\'streampark\'}', 2, 22, 1, '重新启动', '!=', 1, 1, 1, 15, 'StreamPark', 1, '2022-11-21 18:20:51');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (625, 'FE进程存活', 'DORIS', 'up{group=\"fe\",job=\"doris\"}', 2, 23, 1, 'restart', '!=', 1, 1, 1, 15, 'DorisFE', 1, '2023-01-07 22:21:36');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (626, 'BE进程存活', 'DORIS', 'up{group=\"be\",job=\"doris\"}', 2, 23, 1, 'restart', '!=', 1, 1, 1, 15, 'DorisBE', 1, '2023-01-07 22:22:10');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (627, 'HistoryServer进程存活', 'YARN', 'up{job=\"historyserver\"}', 2, 3, 1, '重新启动', '!=', 1, 1, 1, 15, 'HistoryServer', 1, '2023-01-12 14:20:23');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (628, 'DataSophonWorker存活', 'NODE', 'up{job=\'worker\'}', 2, 11, 1, '使用service datasophon-worker start命令启动worker', '!=', 1, 1, 1, 15, 'node', 1, '2023-04-17 21:10:01');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (629, 'Pulsar Broker存活', 'PULSAR', 'up{job=\"pulsarbroker\"}', 2, 24, 1, '重新启动', '!=', 1, 1, 1, 15, 'PulsarBroker', 1, '2023-06-19 10:51:17');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (630, 'Pulsar Bookie进程存活', 'PULSAR', 'up{job=\"bookie\"}', 2, 24, 1, '重新启动', '!=', 1, 1, 1, 15, 'Bookie', 1, '2023-06-19 11:17:07');
-INSERT INTO `t_ddh_cluster_alert_quota` VALUES (631, '存在NodeManager Unhealthy节点', 'YARN', 'Hadoop_ResourceManager_NumUnhealthyNMs', 1, 3, 1, '打开Yarn Web Ui 查看Unhealthy NodeManager节点', '>', 0, 1, 1, 15, 'NodeManager', 1, '2023-06-26 17:12:47');
-
-
-
-
--- ----------------------------
--- Records of t_ddh_cluster_alert_rule
--- ----------------------------
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101001, 101001, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101002, 101002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机CPU使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101003, 101003, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机CPU系统使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101004, 101004, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机CPU用户使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101005, 101005, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机磁盘IO使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101006, 101006, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机交换分区使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101007, 101007, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', '主机磁盘使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101008, 101008, 'TRUE', '>', '8589934592', 60, 'REPEAT', 30, 'WARN', '主机入网带宽', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101009, 101009, 'TRUE', '>', '8589934592', 60, 'REPEAT', 30, 'WARN', '主机出网带宽', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101010, 101010, 'TRUE', '>', '100', 60, 'REPEAT', 30, 'WARN', '系统平均负载[1m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101011, 101011, 'TRUE', '>', '100', 60, 'REPEAT', 30, 'WARN', '系统平均负载[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101012, 101012, 'TRUE', '>', '100', 60, 'REPEAT', 30, 'WARN', '系统平均负载[15m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101013, 101013, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Ntp服务存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (101014, 101014, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Ntp时间同步', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (102001, 102001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'AlertManager进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (103001, 103001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Elasticsearch进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (103002, 103002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'ElasticsearchCPU使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (103003, 103003, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'Elasticsearch内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (103004, 103004, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'Elasticsearch磁盘使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (104001, 104001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'FlinkHistoryServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (105001, 105001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Grafana进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (106001, 106001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HBaseMaster进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (106002, 106002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HRegionServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (106003, 106003, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HThriftServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107001, 107001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'NameNode进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107002, 107002, 'TRUE', '>', '5', 60, 'REPEAT', 30, 'WARN', 'NameNodeRPC延迟[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107003, 107003, 'TRUE', '>', '5', 60, 'REPEAT', 30, 'WARN', 'NameNodeRPC延迟[15m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107004, 107004, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'NameNode堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107005, 107005, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'NameNode老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107006, 107006, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'NameNode新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107007, 107007, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'NameNodeGC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107008, 107008, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'DataNode进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107009, 107009, 'TRUE', '>', '5', 60, 'REPEAT', 30, 'WARN', 'DataNodeRPC[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107010, 107010, 'TRUE', '>', '5', 60, 'REPEAT', 30, 'WARN', 'DataNodeRPC[15m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107011, 107011, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'DataNode堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107012, 107012, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'DataNode老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107013, 107013, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'DataNode新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107014, 107014, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'DataNodeGC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107015, 107015, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'JournalNode进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107016, 107016, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ZKFailoverController进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107017, 107017, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HttpFs进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107018, 107018, 'TRUE', '>', '0', 60, 'REPEAT', 30, 'WARN', 'HDFS坏盘', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (107019, 107019, 'TRUE', '>', '0', 60, 'REPEAT', 30, 'WARN', 'HDFS块丢失', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108001, 108001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HiveServer2进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108002, 108002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'HiveServer2堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108003, 108003, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'HiveServer2老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108004, 108004, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'HiveServer2新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108005, 108005, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HiveMetastore进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108006, 108006, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'HiveMetastore堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108007, 108007, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'HiveMetastore老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108008, 108008, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'HiveMetastore新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (108009, 108009, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'MySQL进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (109001, 109001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'HueServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (110001, 110001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'InfluxDB进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (111001, 111001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'KafkaEagle进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (112001, 112001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Kibana进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (113001, 113001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'KylinServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (113002, 113002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'KylinServer堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (113003, 113003, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'KylinServer老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (113004, 113004, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'KylinServer新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (114001, 114001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'LivyServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (114002, 114002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'LivyServer堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (114003, 114003, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'LivyServer老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (114004, 114004, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'LivyServer新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (115001, 115001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'NodeExporter进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (116001, 116001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'OozieServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (116002, 116002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'OozieServer堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (116003, 116003, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'OozieServer老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (116004, 116004, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'OozieServer新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (117001, 117001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Prometheus进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (118001, 118001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'RangerServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (119001, 119001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'SparkHistoryServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (120001, 120001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'TezUI进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (121001, 121001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'MonitorAgent进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (122001, 122001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ZkUI进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (123001, 123001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'QuarumPeermain进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124001, 124001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ResourceManager进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124002, 124002, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'ResourceManager堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124003, 124003, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'ResourceManager老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124004, 124004, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'ResourceManager新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124005, 124005, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'ResourceManagerGC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124006, 124006, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'NodeManager进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124007, 124007, 'TRUE', '>', '95', 60, 'REPEAT', 30, 'WARN', 'NodeManager堆内存使用率', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124008, 124008, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'NodeManager老年代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124009, 124009, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'NodeManager新生代GC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (124010, 124010, 'TRUE', '>', '60', 60, 'REPEAT', 30, 'WARN', 'NodeManagerGC持续时间[5m]', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (125001, 125001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'PrestoCoordinator进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (125002, 125002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'PrestoWorker进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (126001, 126001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'UdsMaster进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (126002, 126002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'UdsWorker进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (126003, 126003, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'UdsWeb进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (127001, 127001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'KuduMaster进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (127002, 127002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'KuduTserver进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (128001, 128001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ImpalaImpalad进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (128002, 128002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ImpalaCatalog进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (128003, 128003, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ImpalaStatestored进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (129001, 129001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ZeppelinServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (130001, 130001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'AirflowWebserver进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (130002, 130002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'AirflowScheduler进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (131001, 131001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'AtlasIndexServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (131002, 131002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'AtlasServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (132001, 132001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'AlertServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (132002, 132002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'ApiServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (132003, 132003, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'LoggerServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (132004, 132004, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'MasterServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (132005, 132005, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'WorkerServer进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (133001, 133001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'TrinoCoordinator进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (133002, 133002, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'TrinoWorker进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-INSERT INTO `t_ddh_cluster_alert_rule` VALUES (134001, 134001, 'TRUE', '!=', '1', 60, 'REPEAT', 30, 'WARN', 'Neo4j进程存活', NULL, 'VALID', 'FALSE', NULL, NULL, NULL);
-
-
-
--- ----------------------------
--- Records of t_ddh_cluster_service_dashboard
--- ----------------------------
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (1, 'HDFS', 'http://${grafanaHost}:3000/d/huM_B3dZz/2-hdfs?orgId=1&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (2, 'YARN', 'http://${grafanaHost}:3000/d/-ZErfqOWz/3-yarn?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (3, 'HIVE', 'http://${grafanaHost}:3000/d/WYNeBqdZz/5-hive?orgId=1&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (4, 'HBASE', 'http://${grafanaHost}:3000/d/_S8XBqOWz/4-hbase?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (5, 'KAFKA', 'http://${grafanaHost}:3000/d/DGHHkJKWk/6-kafka?orgId=1&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (6, 'ZOOKEEPER', 'http://${grafanaHost}:3000/d/000000261/8-zookeeper?orgId=1&refresh=1m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (7, 'RANGER', 'http://${grafanaHost}:3000/d/qgVDEd3nk/ranger?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (8, 'PROMETHEUS', 'http://${grafanaHost}:3000/d/dd4t3A6nz/prometheus-2-0-overview?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (9, 'GRAFANA', 'http://${grafanaHost}:3000/d/eea-11_sik/grafana?orgId=1&refresh=5m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (10, 'ALERTMANAGER', 'http://${grafanaHost}:3000/d/eea-9_siks/alertmanager?orgId=1&refresh=5m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (11, 'SPARK3', 'http://${grafanaHost}:3000/d/rCUqf3dWz/7-spark?orgId=1&from=now-30m&to=now&refresh=5m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (12, 'TOTAL', 'http://${grafanaHost}:3000/d/_4gf-qOZz/1-zong-lan?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (13, 'TRINO', 'http://${grafanaHost}:3000/d/TGzKne5Wk/trino?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (14, 'STARROCKS', 'http://${grafanaHost}:3000/d/wpcA3tG7z/starrocks?orgId=1&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (15, 'FLINK', 'http://${grafanaHost}:3000/d/-0rFuzoZk/flink-dashboard?orgId=1&refresh=30s&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (16, 'ELASTICSEARCH', 'http://${grafanaHost}:3000/d/3788af4adc3046dd92b3af31d0150c79/elasticsearch-cluster?orgId=1&refresh=5m&var-cluster=ddp_es&var-name=All&var-interval=5m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (17, 'DS', 'http://${grafanaHost}:3000/d/X_NPpJOVk/dolphinscheduler?refresh=1m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (18, 'STREAMPARK', 'http://${grafanaHost}:3000/d/98U0T1OVz/streampark?kiosk&refresh=1m');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (19, 'DINKY', 'http://${grafanaHost}:3000/d/9qU9T1OVk/dinky?kiosk&refresh=1m');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (20, 'DORIS', 'http://${grafanaHost}:3000/d/1fFiWJ4mz/doris-overview?orgId=1&from=now-6h&to=now&refresh=1m&kiosk');
-INSERT INTO `t_ddh_cluster_service_dashboard` VALUES (21, 'KERBEROS', 'http://${grafanaHost}:3000/d/QflaxlA4k/kerberos?orgId=1&refresh=1m&kiosk');
-
-
-
--- ----------------------------
--- Records of t_ddh_install_step
--- ----------------------------
-INSERT INTO `t_ddh_install_step` VALUES (1, '安装主机', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (2, '主机环境校验', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (3, '分发安装启动主机agent', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (4, '选择服务', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (5, '分配服务Master角色', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (6, '分配服务Worker与Client角色', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (7, '服务配置', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (8, '服务安装总览', NULL, 1);
-INSERT INTO `t_ddh_install_step` VALUES (9, '服务安装启动', NULL, 1);
-
--- ----------------------------
--- Records of t_ddh_cluster_group
--- ----------------------------
-INSERT INTO `t_ddh_cluster_group` VALUES (1, 'hadoop', 1);
-INSERT INTO `t_ddh_cluster_group` VALUES (2, 'elastic', 1);
-
--- ----------------------------
--- Records of t_ddh_cluster_user
--- ----------------------------
-INSERT INTO `t_ddh_cluster_user` VALUES (1, 'hdfs', 1);
-INSERT INTO `t_ddh_cluster_user` VALUES (2, 'hive', 1);
-INSERT INTO `t_ddh_cluster_user` VALUES (3, 'yarn', 1);
-INSERT INTO `t_ddh_cluster_user` VALUES (4, 'mapred', 1);
-INSERT INTO `t_ddh_cluster_user` VALUES (5, 'elastic', 1);
-INSERT INTO `t_ddh_cluster_user` VALUES (6, 'hbase', 1);
-INSERT INTO `t_ddh_cluster_user` VALUES (9, 'admin', 1);
-
--- ----------------------------
--- Records of t_ddh_cluster_user_group
--- ----------------------------
-INSERT INTO `t_ddh_cluster_user_group` VALUES (1, 1, 1, 1, 1);
-INSERT INTO `t_ddh_cluster_user_group` VALUES (2, 2, 1, 1, 1);
-INSERT INTO `t_ddh_cluster_user_group` VALUES (3, 3, 1, 1, 1);
-INSERT INTO `t_ddh_cluster_user_group` VALUES (4, 4, 1, 1, 1);
-INSERT INTO `t_ddh_cluster_user_group` VALUES (5, 5, 2, 1, 1);
-INSERT INTO `t_ddh_cluster_user_group` VALUES (6, 6, 1, 1, 1);
-INSERT INTO `t_ddh_cluster_user_group` VALUES (11, 9, 1, 1, 1);
-
--- ----------------------------
--- Records of t_ddh_session
--- ----------------------------
-INSERT INTO `t_ddh_session` VALUES ('3f229c41-84ee-4a09-a0b9-76e95f0577dc', 2, '192.168.75.12', '2022-09-07 11:52:12');
-INSERT INTO `t_ddh_session` VALUES ('d25dd005-ceb6-4414-bfdf-9279a23c2ba6', 1, '192.168.75.12', '2023-02-12 20:34:57');
-
-
-
--- ----------------------------
--- Records of t_ddh_user_info
--- ----------------------------
-INSERT INTO `t_ddh_user_info` VALUES (1, 'admin', '0192023a7bbd73250516f069df18b500', 'xxx@163.com', '1865xx', '2022-05-10 16:05:18', 1);
diff --git a/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java b/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java
index 6d5254ca..baab38ab 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java
@@ -86,7 +86,8 @@ public enum Status {
"All instances of the same service on the same machine need to be within the same role group",
"同一个服务在同一台机器上的所有实例需要在同一个角色组内"),
ODD_NUMBER_ARE_REQUIRED_FOR_DORISFE(10040, "The Number of DorisFE must be an odd number.", "DorisFE个数必须是奇数"),
- NO_SERVICE_ROLE_SELECTED(10041, "No service role selected", "未选择需要安装的服务实例");
+ NO_SERVICE_ROLE_SELECTED(10041, "No service role selected", "未选择需要安装的服务实例"),
+ TWO_KYUUBISERVERS_NEED_TO_BE_DEPLOYED(10042, "two kyuubiServer deployments are required", "KyuubiServer需要两个节点"),;
private final int code;
diff --git a/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java b/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java
index 302bc4a6..1a11a8e4 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java
@@ -47,6 +47,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.datasophon.dao.enums.ServiceRoleState;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -108,28 +109,45 @@ public void onReceive(Object message) throws Throwable {
prometheusActor.tell(prometheusConfigCommand, getSelf());
// tell to worker what need to start
- autoStartServiceNeeded(msg.getHostname(), cluster.getId());
+ autoAddServiceOperatorNeeded(msg.getHostname(), cluster.getId(), CommandType.START_SERVICE,false);
} else if(message instanceof WorkerServiceMessage) {
- // 启动节点上安装的服务
WorkerServiceMessage msg = (WorkerServiceMessage) message;
- // tell to worker what need to start
- autoStartServiceNeeded(msg.getHostname(), msg.getClusterId());
+ // tell to worker what need to start/stop
+ autoAddServiceOperatorNeeded(msg.getHostname(), msg.getClusterId(), msg.getCommandType(),true);
}
}
/**
- * Automatically start services that need to be started
+ * Automatically start/stop services that need to be started
*
* @param clusterId
*/
- private void autoStartServiceNeeded(String hostname, Integer clusterId) {
+ private void autoAddServiceOperatorNeeded(String hostname, Integer clusterId,CommandType commandType,
+ boolean needRestart) {
ClusterServiceRoleInstanceService roleInstanceService =
SpringTool.getApplicationContext().getBean(ClusterServiceRoleInstanceService.class);
ClusterServiceCommandService serviceCommandService =
SpringTool.getApplicationContext().getBean(ClusterServiceCommandService.class);
- List serviceRoleList =
- roleInstanceService.listStoppedServiceRoleListByHostnameAndClusterId(hostname, clusterId);
+ List serviceRoleList = null;
+ // 启动服务
+ if (CommandType.START_SERVICE.equals(commandType)) {
+ serviceRoleList = roleInstanceService
+ .listStoppedServiceRoleListByHostnameAndClusterId(hostname, clusterId);
+ // 重启时重刷服务配置以支持磁盘故障等问题
+ if(needRestart){
+ roleInstanceService.updateToNeedRestartByHost(hostname);
+ }
+ }
+
+ // 停止运行状态的服务
+ if(commandType.STOP_SERVICE.equals(commandType)){
+ serviceRoleList = roleInstanceService
+ .getServiceRoleListByHostnameAndClusterId(hostname, clusterId).stream()
+ .filter(roleInstance -> (!ServiceRoleState.STOP.equals(roleInstance.getServiceRoleState()) &&
+ !ServiceRoleState.DECOMMISSIONED.equals(roleInstance.getServiceRoleState()))).collect(toList());
+ }
+
if (CollectionUtils.isEmpty(serviceRoleList)) {
logger.info("No services need to start at host {}.", hostname);
return;
@@ -141,7 +159,7 @@ private void autoStartServiceNeeded(String hostname, Integer clusterId) {
ClusterServiceRoleInstanceEntity::getServiceId,
mapping(i -> String.valueOf(i.getId()), toList())));
Result result =
- serviceCommandService.generateServiceRoleCommands(clusterId, CommandType.START_SERVICE, serviceRoleMap);
+ serviceCommandService.generateServiceRoleCommands(clusterId, commandType, serviceRoleMap);
if (result.getCode() == 200) {
logger.info("Auto-start services successful");
} else {
diff --git a/datasophon-service/src/main/java/com/datasophon/api/master/handler/service/ServiceInstallHandler.java b/datasophon-service/src/main/java/com/datasophon/api/master/handler/service/ServiceInstallHandler.java
index cc8fb437..b2439bc9 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/master/handler/service/ServiceInstallHandler.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/master/handler/service/ServiceInstallHandler.java
@@ -65,22 +65,25 @@ public ExecResult handlerRequest(ServiceRoleInfo serviceRoleInfo) throws Excepti
installServiceRoleCommand.setServiceName(serviceRoleInfo.getParentName());
installServiceRoleCommand.setServiceRoleName(serviceRoleInfo.getName());
installServiceRoleCommand.setServiceRoleType(serviceRoleInfo.getRoleType());
- installServiceRoleCommand.setPackageName(serviceRoleInfo.getPackageName());
installServiceRoleCommand.setDecompressPackageName(serviceRoleInfo.getDecompressPackageName());
installServiceRoleCommand.setRunAs(serviceRoleInfo.getRunAs());
installServiceRoleCommand.setServiceRoleType(serviceRoleInfo.getRoleType());
- String md5 = FileUtil.readString(
- Constants.MASTER_MANAGE_PACKAGE_PATH + Constants.SLASH + serviceRoleInfo.getPackageName() + ".md5",
- Charset.defaultCharset());
- installServiceRoleCommand.setPackageMd5(md5);
+
+ String md5 ;
if ("aarch64".equals(hostEntity.getCpuArchitecture()) && FileUtil.exist(Constants.MASTER_MANAGE_PACKAGE_PATH
+ Constants.SLASH + serviceRoleInfo.getDecompressPackageName() + "-arm.tar.gz")) {
installServiceRoleCommand.setPackageName(serviceRoleInfo.getDecompressPackageName() + "-arm.tar.gz");
logger.info("find arm package {}", installServiceRoleCommand.getPackageName());
- String armMd5 = FileUtil.readString(Constants.MASTER_MANAGE_PACKAGE_PATH + Constants.SLASH
+ md5 = FileUtil.readString(Constants.MASTER_MANAGE_PACKAGE_PATH + Constants.SLASH
+ serviceRoleInfo.getDecompressPackageName() + "-arm.tar.gz.md5", Charset.defaultCharset());
- installServiceRoleCommand.setPackageMd5(armMd5);
+ }else {
+ installServiceRoleCommand.setPackageName(serviceRoleInfo.getPackageName());
+ md5 = FileUtil.readString(
+ Constants.MASTER_MANAGE_PACKAGE_PATH + Constants.SLASH + serviceRoleInfo.getPackageName() + ".md5",
+ Charset.defaultCharset());
}
+ installServiceRoleCommand.setPackageMd5(md5);
+
ActorSelection actorSelection = ActorUtils.actorSystem.actorSelection(
"akka.tcp://datasophon@" + serviceRoleInfo.getHostname() + ":2552/user/worker/installServiceActor");
Timeout timeout = new Timeout(Duration.create(180, TimeUnit.SECONDS));
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java b/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java
index 68e20492..d8335ded 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java
@@ -65,6 +65,8 @@ List getServiceRoleInstanceListByClusterIdAndR
void updateToNeedRestart(Integer roleGroupId);
+ void updateToNeedRestartByHost(String hostName);
+
List getObsoleteService(Integer id);
List getStoppedRoleInstanceOnHost(Integer clusterId, String hostname,
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java b/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java
index 9a996c09..2b65bf29 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java
@@ -44,7 +44,7 @@ Result analysisHostList(Integer clusterId, String hosts, String sshUser, Integer
/**
- * 启动 主机上安装的服务启动
+ * 启动/停止 主机上安装的服务启动
* @param clusterHostIds
* @param commandType
* @return
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java
index c5398fa6..5a3d0e2b 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java
@@ -26,6 +26,7 @@
import com.datasophon.api.master.ActorUtils;
import com.datasophon.api.master.PrometheusActor;
import com.datasophon.api.master.RackActor;
+import com.datasophon.api.service.ClusterRackService;
import com.datasophon.api.service.host.ClusterHostService;
import com.datasophon.api.service.ClusterInfoService;
import com.datasophon.api.service.ClusterServiceRoleInstanceService;
@@ -39,6 +40,7 @@
import com.datasophon.common.utils.Result;
import com.datasophon.dao.entity.ClusterHostDO;
import com.datasophon.dao.entity.ClusterInfoEntity;
+import com.datasophon.dao.entity.ClusterRack;
import com.datasophon.dao.entity.ClusterServiceRoleInstanceEntity;
import com.datasophon.domain.host.enums.HostState;
import com.datasophon.dao.enums.RoleType;
@@ -74,6 +76,9 @@ public class ClusterHostServiceImpl extends ServiceImpl rackMap = clusterRackService.queryClusterRack(clusterId).stream()
+ .collect(Collectors.toMap(obj->obj.getId()+"", ClusterRack::getRack));
for (ClusterHostDO clusterHostDO : list) {
QueryHostListPageDTO queryHostListPageDTO = new QueryHostListPageDTO();
BeanUtils.copyProperties(clusterHostDO,queryHostListPageDTO);
@@ -104,6 +113,7 @@ public Result listByPage(Integer clusterId, String hostname, String ip, String c
.eq(Constants.HOSTNAME, clusterHostDO.getHostname()));
queryHostListPageDTO.setServiceRoleNum(serviceRoleNum);
queryHostListPageDTO.setHostState(clusterHostDO.getHostState().getValue());
+ queryHostListPageDTO.setRack(rackMap.getOrDefault(queryHostListPageDTO.getRack(),"/default-rack"));
hostListPageDTOS.add(queryHostListPageDTO);
}
int count = this.count(new QueryWrapper().eq(Constants.CLUSTER_ID, clusterId)
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java
index cc81382f..13d1345a 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java
@@ -315,6 +315,11 @@ public void updateToNeedRestart(Integer roleGroupId) {
roleInstanceMapper.updateToNeedRestart(roleGroupId);
}
+ @Override
+ public void updateToNeedRestartByHost(String hostName) {
+ roleInstanceMapper.updateToNeedRestartByHost(hostName);
+ }
+
@Override
public List getObsoleteService(Integer serviceInstanceId) {
return this.lambdaQuery()
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/FrameInfoServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/FrameInfoServiceImpl.java
index 76d09436..92a1a862 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/FrameInfoServiceImpl.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/FrameInfoServiceImpl.java
@@ -48,6 +48,7 @@ public Result getAllClusterFrame() {
Set frameInfoIds = frameInfoEntities.stream().map(FrameInfoEntity::getId).collect(Collectors.toSet());
Map> frameServiceGroupBys = frameServiceService.lambdaQuery()
+ .select(FrameServiceEntity::getId, FrameServiceEntity::getFrameId, FrameServiceEntity::getFrameCode, FrameServiceEntity::getServiceName, FrameServiceEntity::getServiceVersion, FrameServiceEntity::getServiceDesc)
.in(FrameServiceEntity::getFrameId, frameInfoIds)
.list()
.stream()
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java
index 09d60a8a..74e515cc 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java
@@ -41,6 +41,7 @@
import com.datasophon.common.cache.CacheUtils;
import com.datasophon.common.command.DispatcherHostAgentCommand;
import com.datasophon.common.command.HostCheckCommand;
+import com.datasophon.common.enums.CommandType;
import com.datasophon.common.enums.InstallState;
import com.datasophon.common.model.CheckResult;
import com.datasophon.common.model.HostInfo;
@@ -451,9 +452,11 @@ public Result generateHostServiceCommand(String clusterHostIds, String commandTy
String[] clusterHostIdArray = clusterHostIds.split(Constants.COMMA);
List clusterHostList = hostService.getHostListByIds(Arrays.asList(clusterHostIdArray));
Result result = null;
+
+ CommandType serviceCommandType = "start".equalsIgnoreCase(commandType) ? CommandType.START_SERVICE : CommandType.STOP_SERVICE;
for (ClusterHostDO clusterHostDO : clusterHostList) {
WorkerServiceMessage serviceMessage = new WorkerServiceMessage(
- clusterHostDO.getHostname(), clusterHostDO.getClusterId());
+ clusterHostDO.getHostname(), clusterHostDO.getClusterId(), serviceCommandType);
try {
ActorRef actor =
ActorUtils.getLocalActor(WorkerStartActor.class, "workerStartActor");
diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java
index 5e031171..5b00bb93 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java
@@ -706,6 +706,9 @@ private void serviceValidation(ServiceRoleHostMapping serviceRoleHostMapping) {
if ("DorisFE".equals(serviceRole) && (hosts.size() & 1) == 0) {
throw new ServiceException(Status.ODD_NUMBER_ARE_REQUIRED_FOR_DORISFE.getMsg());
}
+ if ("KyuubiServer".equals(serviceRole) && hosts.size() !=2) {
+ throw new ServiceException(Status.TWO_KYUUBISERVERS_NEED_TO_BE_DEPLOYED.getMsg());
+ }
}
private List listServiceConfigByServiceInstance(
diff --git a/datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java b/datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java
new file mode 100644
index 00000000..9486152e
--- /dev/null
+++ b/datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java
@@ -0,0 +1,89 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.datasophon.api.strategy;
+
+import com.datasophon.api.load.GlobalVariables;
+import com.datasophon.api.load.ServiceConfigMap;
+import com.datasophon.api.utils.ProcessUtils;
+import com.datasophon.common.Constants;
+import com.datasophon.common.model.ServiceConfig;
+import com.datasophon.common.model.ServiceRoleInfo;
+import com.datasophon.dao.entity.ClusterInfoEntity;
+import com.datasophon.dao.entity.ClusterServiceRoleInstanceEntity;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class KyuubiServerHandlerStrategy extends ServiceHandlerAbstract implements ServiceRoleStrategy{
+
+ private static final Logger logger = LoggerFactory.getLogger(KyuubiServerHandlerStrategy.class);
+ private static final String ENABLE_KERBEROS = "enableKerberos";
+
+ @Override
+ public void handler(Integer clusterId, List hosts) {
+
+ }
+
+ @Override
+ public void handlerConfig(Integer clusterId, List list) {
+
+ Map globalVariables = GlobalVariables.get(clusterId);
+ boolean enableKerberos = false;
+ Map map = ProcessUtils.translateToMap(list);
+ ClusterInfoEntity clusterInfo = ProcessUtils.getClusterInfo(clusterId);
+ // todo: 判断kerberos的逻辑应该抽取到公共方法中
+ for (ServiceConfig config : list) {
+ if (ENABLE_KERBEROS.equals(config.getName())) {
+ enableKerberos =
+ isEnableKerberos(
+ clusterId, globalVariables, enableKerberos, config, "KYUUBI");
+ }
+ }
+ String key = clusterInfo.getClusterFrame() + Constants.UNDERLINE + "KYUUBI" + Constants.CONFIG;
+ List configs = ServiceConfigMap.get(key);
+ ArrayList kbConfigs = new ArrayList<>();
+ if (enableKerberos) {
+ addConfigWithKerberos(globalVariables, map, configs, kbConfigs);
+ } else {
+ removeConfigWithKerberos(list, map, configs);
+ }
+ list.addAll(kbConfigs);
+ }
+
+ @Override
+ public void getConfig(Integer clusterId, List list) {
+
+ }
+
+ @Override
+ public void handlerServiceRoleInfo(ServiceRoleInfo serviceRoleInfo, String hostname) {
+
+ }
+
+ @Override
+ public void handlerServiceRoleCheck(ClusterServiceRoleInstanceEntity roleInstanceEntity,
+ Map map) {
+ }
+
+}
diff --git a/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java b/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java
index eba9235e..4c6d15e8 100644
--- a/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java
+++ b/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java
@@ -1,69 +1,70 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.datasophon.api.strategy;
-
-import org.apache.commons.lang.StringUtils;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-public class ServiceRoleStrategyContext {
-
- private static final Map map = new ConcurrentHashMap<>();
-
- static {
- map.put("NameNode", new NameNodeHandlerStrategy());
- map.put("ResourceManager", new RMHandlerStrategy());
- map.put("HiveMetaStore", new HiveMetaStroreHandlerStrategy());
- map.put("HiveServer2", new HiveServer2HandlerStrategy());
- map.put("Grafana", new GrafanaHandlerStrategy());
- map.put("ZkServer", new ZkServerHandlerStrategy());
- map.put("HistoryServer", new HistoryServerHandlerStrategy());
- map.put("TimelineServer", new TSHandlerStrategy());
- map.put("TrinoCoordinator", new TrinoHandlerStrategy());
- map.put("JournalNode", new JournalNodeHandlerStrategy());
- map.put("ZKFC", new ZKFCHandlerStrategy());
- map.put("SRFE", new FEHandlerStartegy());
- map.put("DorisFE", new FEHandlerStartegy());
- map.put("DorisFEObserver", new FEObserverHandlerStartegy());
- map.put("SRBE", new BEHandlerStartegy());
- map.put("DorisBE", new BEHandlerStartegy());
- map.put("Krb5Kdc", new Krb5KdcHandlerStrategy());
- map.put("KAdmin", new KAdminHandlerStrategy());
- map.put("RangerAdmin", new RangerAdminHandlerStrategy());
- map.put("ElasticSearch", new ElasticSearchHandlerStrategy());
- map.put("Prometheus", new PrometheusHandlerStrategy());
- map.put("AlertManager", new AlertManagerHandlerStrategy());
-
- map.put("RANGER", new RangerAdminHandlerStrategy());
- map.put("ZOOKEEPER", new ZkServerHandlerStrategy());
- map.put("YARN", new RMHandlerStrategy());
- map.put("HDFS", new NameNodeHandlerStrategy());
- map.put("HIVE", new HiveServer2HandlerStrategy());
- map.put("KAFKA", new KafkaHandlerStrategy());
- map.put("HBASE", new HBaseHandlerStrategy());
- map.put("FLINK", new FlinkHandlerStrategy());
- }
-
- public static ServiceRoleStrategy getServiceRoleHandler(String type) {
- if (StringUtils.isBlank(type)) {
- return null;
- }
- return map.get(type);
- }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.datasophon.api.strategy;
+
+import org.apache.commons.lang.StringUtils;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class ServiceRoleStrategyContext {
+
+ private static final Map map = new ConcurrentHashMap<>();
+
+ static {
+ map.put("NameNode", new NameNodeHandlerStrategy());
+ map.put("ResourceManager", new RMHandlerStrategy());
+ map.put("HiveMetaStore", new HiveMetaStroreHandlerStrategy());
+ map.put("HiveServer2", new HiveServer2HandlerStrategy());
+ map.put("Grafana", new GrafanaHandlerStrategy());
+ map.put("ZkServer", new ZkServerHandlerStrategy());
+ map.put("HistoryServer", new HistoryServerHandlerStrategy());
+ map.put("TimelineServer", new TSHandlerStrategy());
+ map.put("TrinoCoordinator", new TrinoHandlerStrategy());
+ map.put("JournalNode", new JournalNodeHandlerStrategy());
+ map.put("ZKFC", new ZKFCHandlerStrategy());
+ map.put("SRFE", new FEHandlerStartegy());
+ map.put("DorisFE", new FEHandlerStartegy());
+ map.put("DorisFEObserver", new FEObserverHandlerStartegy());
+ map.put("SRBE", new BEHandlerStartegy());
+ map.put("DorisBE", new BEHandlerStartegy());
+ map.put("Krb5Kdc", new Krb5KdcHandlerStrategy());
+ map.put("KAdmin", new KAdminHandlerStrategy());
+ map.put("RangerAdmin", new RangerAdminHandlerStrategy());
+ map.put("ElasticSearch", new ElasticSearchHandlerStrategy());
+ map.put("Prometheus", new PrometheusHandlerStrategy());
+ map.put("AlertManager", new AlertManagerHandlerStrategy());
+
+ map.put("RANGER", new RangerAdminHandlerStrategy());
+ map.put("ZOOKEEPER", new ZkServerHandlerStrategy());
+ map.put("YARN", new RMHandlerStrategy());
+ map.put("HDFS", new NameNodeHandlerStrategy());
+ map.put("HIVE", new HiveServer2HandlerStrategy());
+ map.put("KAFKA", new KafkaHandlerStrategy());
+ map.put("HBASE", new HBaseHandlerStrategy());
+ map.put("FLINK", new FlinkHandlerStrategy());
+ map.put("KYUUBI",new KyuubiServerHandlerStrategy());
+ }
+
+ public static ServiceRoleStrategy getServiceRoleHandler(String type) {
+ if (StringUtils.isBlank(type)) {
+ return null;
+ }
+ return map.get(type);
+ }
+}
diff --git a/datasophon-ui/src/pages/hostManage/index.vue b/datasophon-ui/src/pages/hostManage/index.vue
index f0daa8d8..800c8e03 100644
--- a/datasophon-ui/src/pages/hostManage/index.vue
+++ b/datasophon-ui/src/pages/hostManage/index.vue
@@ -43,6 +43,7 @@
启动主机服务
+ 停止主机服务
启动主机Worker
停止主机Worker
重新安装Worker
@@ -482,6 +483,10 @@ export default {
this.doConfirm("启动该主机服务", this.handStartService);
return false;
}
+ if(key.key === "handStopService") {
+ this.doConfirm("停止该主机服务", this.handStopCommand);
+ return false;
+ }
if(key.key === "handStartHost") {
// 启动主机 Worker
this.doConfirm("启动该主机 Worker", this.handStartHost)
@@ -539,13 +544,19 @@ export default {
})
},
handStartService() {
+ this.handCommand('start')
+ },
+ handStopCommand() {
+ this.handCommand('stop')
+ },
+ handCommand(op){
let params = {
clusterHostIds: this.hostnames.join(","),
- commandType: "start",
+ commandType: op,
};
this.$axiosPost(global.API.generateHostServiceCommand, params).then((resp) => {
if (resp.code === 200) {
- this.$message.success("启动 Worker 服务成功");
+ this.$message.success(op == 'start' ? "启动 Worker 服务成功" : "停止 Worker 服务成功");
} else {
this.$message.error(resp.msg);
}
diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java b/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java
index 479f73c9..61e8e559 100644
--- a/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java
+++ b/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java
@@ -108,6 +108,7 @@ private static void initUserMap(Map userMap) {
userMap.put("hive", HADOOP);
userMap.put("mapred", HADOOP);
userMap.put("hbase", HADOOP);
+ userMap.put("kyuubi",HADOOP);
userMap.put("elastic", "elastic");
}
diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java b/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java
index 93d4728b..9dd588eb 100644
--- a/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java
+++ b/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java
@@ -135,6 +135,17 @@ public ExecResult configure(Map> cofigFileMap,
config.setName("priority_networks");
}
+ if("KyuubiServer".equals(serviceRoleName) && "sparkHome".equals(config.getName())){
+ // add hive-site.xml link in kerberos module
+ final String targetPath = Constants.INSTALL_PATH + File.separator + decompressPackageName+"/conf/hive-site.xml";
+ if(!FileUtil.exist(targetPath)){
+ logger.info("Add hive-site.xml link");
+ ExecResult result = ShellUtils.exceShell("ln -s "+config.getValue()+"/conf/hive-site.xml "+targetPath);
+ if(!result.getExecResult()){
+ logger.warn("Add hive-site.xml link failed,msg: "+result.getExecErrOut());
+ }
+ }
+ }
}
if (Objects.nonNull(myid) && StringUtils.isNotBlank(dataDir)) {
diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java
new file mode 100644
index 00000000..cba3d4b0
--- /dev/null
+++ b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java
@@ -0,0 +1,45 @@
+package com.datasophon.worker.strategy;
+
+import cn.hutool.core.io.FileUtil;
+import com.datasophon.common.Constants;
+import com.datasophon.common.cache.CacheUtils;
+import com.datasophon.common.command.ServiceRoleOperateCommand;
+import com.datasophon.common.utils.ExecResult;
+import com.datasophon.worker.handler.ServiceHandler;
+import com.datasophon.worker.utils.KerberosUtils;
+import java.sql.SQLException;
+
+/**
+ * @author thomasgx
+ * @date 2023年10月20日 19:02
+ */
+public class KyuubiServerHandlerStrategy extends AbstractHandlerStrategy implements
+ ServiceRoleStrategy {
+
+ private static final String KEYTAB_NAME = "kyuubi.service.keytab";
+ private static final String KEYTAB_PATH = "/etc/security/keytab/" + KEYTAB_NAME;
+
+ public KyuubiServerHandlerStrategy(String serviceName, String serviceRoleName) {
+ super(serviceName, serviceRoleName);
+ }
+
+ @Override
+ public ExecResult handler(ServiceRoleOperateCommand command)
+ throws SQLException, ClassNotFoundException {
+ ExecResult startResult;
+ if (command.getEnableKerberos()) {
+ logger.info("start to get kyuubi keytab file");
+ String hostname = CacheUtils.getString(Constants.HOSTNAME);
+ KerberosUtils.createKeytabDir();
+ if (!FileUtil.exist(KEYTAB_PATH)) {
+ KerberosUtils.downloadKeytabFromMaster("kyuubi/" + hostname, KEYTAB_NAME);
+ }
+ }
+
+ ServiceHandler serviceHandler = new ServiceHandler(command.getServiceName(),
+ command.getServiceRoleName());
+ startResult = serviceHandler.start(command.getStartRunner(), command.getStatusRunner(),
+ command.getDecompressPackageName(), command.getRunAs());
+ return startResult;
+ }
+}
diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java
index 5665e05d..a5a21911 100644
--- a/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java
+++ b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java
@@ -1,61 +1,63 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.datasophon.worker.strategy;
-
-import org.apache.commons.lang.StringUtils;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-public class ServiceRoleStrategyContext {
-
- private static final Map map = new ConcurrentHashMap<>();
-
- static {
- map.put("NameNode", new NameNodeHandlerStrategy("HDFS", "NameNode"));
- map.put("ZKFC", new ZKFCHandlerStrategy("HDFS", "ZKFC"));
- map.put("JournalNode", new JournalNodeHandlerStrategy("HDFS", "JournalNode"));
- map.put("DataNode", new DataNodeHandlerStrategy("HDFS", "DataNode"));
- map.put("ResourceManager", new ResourceManagerHandlerStrategy("YARN", "ResourceManager"));
- map.put("NodeManager", new NodeManagerHandlerStrategy("YARN", "NodeManager"));
- map.put("RangerAdmin", new RangerAdminHandlerStrategy("RANGER", "RangerAdmin"));
- map.put("HiveServer2", new HiveServer2HandlerStrategy("HIVE", "HiveServer2"));
- map.put("HbaseMaster", new HbaseHandlerStrategy("HBASE", "HbaseMaster"));
- map.put("RegionServer", new HbaseHandlerStrategy("HBASE", "RegionServer"));
- map.put("Krb5Kdc", new Krb5KdcHandlerStrategy("KERBEROS", "Krb5Kdc"));
- map.put("KAdmin", new KAdminHandlerStrategy("KERBEROS", "KAdmin"));
- map.put("SRFE", new FEHandlerStrategy("STARROCKS", "SRFE"));
- map.put("DorisFE", new FEHandlerStrategy("DORIS", "DorisFE"));
- map.put("DorisFEObserver", new FEObserverHandlerStrategy("DORIS", "DorisFEObserver"));
- map.put("ZkServer", new ZkServerHandlerStrategy("ZOOKEEPER", "ZkServer"));
- map.put("KafkaBroker", new KafkaHandlerStrategy("KAFKA", "KafkaBroker"));
- map.put("SRBE", new BEHandlerStrategy("STARROCKS", "SRBE"));
- map.put("DorisBE", new BEHandlerStrategy("DORIS", "DorisBE"));
- map.put("HistoryServer", new HistoryServerHandlerStrategy("YARN", "HistoryServer"));
-
- // TEZ Server service
- map.put("TezServer", new TezServerHandlerStrategy("TEZ", "TezServer"));
- }
-
- public static ServiceRoleStrategy getServiceRoleHandler(String type) {
- if (StringUtils.isBlank(type)) {
- return null;
- }
- return map.get(type);
- }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.datasophon.worker.strategy;
+
+import org.apache.commons.lang.StringUtils;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class ServiceRoleStrategyContext {
+
+ private static final Map map = new ConcurrentHashMap<>();
+
+ static {
+ map.put("NameNode", new NameNodeHandlerStrategy("HDFS", "NameNode"));
+ map.put("ZKFC", new ZKFCHandlerStrategy("HDFS", "ZKFC"));
+ map.put("JournalNode", new JournalNodeHandlerStrategy("HDFS", "JournalNode"));
+ map.put("DataNode", new DataNodeHandlerStrategy("HDFS", "DataNode"));
+ map.put("ResourceManager", new ResourceManagerHandlerStrategy("YARN", "ResourceManager"));
+ map.put("NodeManager", new NodeManagerHandlerStrategy("YARN", "NodeManager"));
+ map.put("RangerAdmin", new RangerAdminHandlerStrategy("RANGER", "RangerAdmin"));
+ map.put("HiveServer2", new HiveServer2HandlerStrategy("HIVE", "HiveServer2"));
+ map.put("HbaseMaster", new HbaseHandlerStrategy("HBASE", "HbaseMaster"));
+ map.put("RegionServer", new HbaseHandlerStrategy("HBASE", "RegionServer"));
+ map.put("Krb5Kdc", new Krb5KdcHandlerStrategy("KERBEROS", "Krb5Kdc"));
+ map.put("KAdmin", new KAdminHandlerStrategy("KERBEROS", "KAdmin"));
+ map.put("SRFE", new FEHandlerStrategy("STARROCKS", "SRFE"));
+ map.put("DorisFE", new FEHandlerStrategy("DORIS", "DorisFE"));
+ map.put("DorisFEObserver", new FEObserverHandlerStrategy("DORIS", "DorisFEObserver"));
+ map.put("ZkServer", new ZkServerHandlerStrategy("ZOOKEEPER", "ZkServer"));
+ map.put("KafkaBroker", new KafkaHandlerStrategy("KAFKA", "KafkaBroker"));
+ map.put("SRBE", new BEHandlerStrategy("STARROCKS", "SRBE"));
+ map.put("DorisBE", new BEHandlerStrategy("DORIS", "DorisBE"));
+ map.put("HistoryServer", new HistoryServerHandlerStrategy("YARN", "HistoryServer"));
+
+ // TEZ Server service
+ map.put("TezServer", new TezServerHandlerStrategy("TEZ", "TezServer"));
+ //kyuubi
+ map.put("KyuubiServer", new KyuubiServerHandlerStrategy("KYUUBI", "KyuubiServer"));
+ }
+
+ public static ServiceRoleStrategy getServiceRoleHandler(String type) {
+ if (StringUtils.isBlank(type)) {
+ return null;
+ }
+ return map.get(type);
+ }
+}
diff --git a/datasophon-worker/src/main/resources/script/datasophon-env.sh b/datasophon-worker/src/main/resources/script/datasophon-env.sh
index 2f3f0a88..f957c15e 100644
--- a/datasophon-worker/src/main/resources/script/datasophon-env.sh
+++ b/datasophon-worker/src/main/resources/script/datasophon-env.sh
@@ -2,6 +2,7 @@ export JAVA_HOME=/usr/local/jdk1.8.0_333
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export JAVA_HOME CLASSPATH
+export KYUUBI_HOME=/opt/datasophon/kyuubi-1.7.3
export SPARK_HOME=/opt/datasophon/spark-3.1.3
export PYSPARK_ALLOW_INSECURE_GATEWAY=1
export HIVE_HOME=/opt/datasophon/hive-3.1.0
diff --git a/datasophon-worker/src/main/resources/templates/kyuubi-env.ftl b/datasophon-worker/src/main/resources/templates/kyuubi-env.ftl
new file mode 100644
index 00000000..670a9e1e
--- /dev/null
+++ b/datasophon-worker/src/main/resources/templates/kyuubi-env.ftl
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# - JAVA_HOME Java runtime to use. By default use "java" from PATH.
+#
+#
+# - KYUUBI_CONF_DIR Directory containing the Kyuubi configurations to use.
+# (Default: $KYUUBI_HOME/conf)
+# - KYUUBI_LOG_DIR Directory for Kyuubi server-side logs.
+# (Default: $KYUUBI_HOME/logs)
+# - KYUUBI_PID_DIR Directory stores the Kyuubi instance pid file.
+# (Default: $KYUUBI_HOME/pid)
+# - KYUUBI_MAX_LOG_FILES Maximum number of Kyuubi server logs can rotate to.
+# (Default: 5)
+# - KYUUBI_JAVA_OPTS JVM options for the Kyuubi server itself in the form "-Dx=y".
+# (Default: none).
+# - KYUUBI_CTL_JAVA_OPTS JVM options for the Kyuubi ctl itself in the form "-Dx=y".
+# (Default: none).
+# - KYUUBI_BEELINE_OPTS JVM options for the Kyuubi BeeLine in the form "-Dx=Y".
+# (Default: none)
+# - KYUUBI_NICENESS The scheduling priority for Kyuubi server.
+# (Default: 0)
+# - KYUUBI_WORK_DIR_ROOT Root directory for launching sql engine applications.
+# (Default: $KYUUBI_HOME/work)
+# - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use.
+# - YARN_CONF_DIR Directory containing the YARN configuration to use.
+#
+# - SPARK_HOME Spark distribution which you would like to use in Kyuubi.
+# - SPARK_CONF_DIR Optional directory where the Spark configuration lives.
+# (Default: $SPARK_HOME/conf)
+# - FLINK_HOME Flink distribution which you would like to use in Kyuubi.
+# - FLINK_CONF_DIR Optional directory where the Flink configuration lives.
+# (Default: $FLINK_HOME/conf)
+# - FLINK_HADOOP_CLASSPATH Required Hadoop jars when you use the Kyuubi Flink engine.
+# - HIVE_HOME Hive distribution which you would like to use in Kyuubi.
+# - HIVE_CONF_DIR Optional directory where the Hive configuration lives.
+# (Default: $HIVE_HOME/conf)
+# - HIVE_HADOOP_CLASSPATH Required Hadoop jars when you use the Kyuubi Hive engine.
+#
+
+# set server jvm
+export KYUUBI_JAVA_OPTS="-Xmx${kyuubiServerHeapSize}g -XX:+UnlockDiagnosticVMOptions -XX:ParGCCardsPerStrideChunk=4096 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -XX:MaxDirectMemorySize=1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -Xloggc:./logs/kyuubi-server-gc-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=5M -XX:NewRatio=3 -XX:MetaspaceSize=512m"
+
+# set client jvm
+export KYUUBI_BEELINE_OPTS="-Xmx${kyuubiClientHeapSize}g -XX:+UnlockDiagnosticVMOptions -XX:ParGCCardsPerStrideChunk=4096 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark"
+#jdk
+export JAVA_HOME=${javaHome}
+#spark engine
+export SPARK_HOME=${sparkHome}
+
+#hadoop config
+export HADOOP_CONF_DIR=${hadoopConfDir}
+export YARN_CONF_DIR=${hadoopConfDir}
+
+# customer env
+<#list itemList as item>
+ export ${item.name}=${item.value}
+#list>
+
+
diff --git a/datasophon-worker/src/main/resources/templates/streampark.ftl b/datasophon-worker/src/main/resources/templates/streampark.ftl
index e14ac52d..6bcd1b77 100644
--- a/datasophon-worker/src/main/resources/templates/streampark.ftl
+++ b/datasophon-worker/src/main/resources/templates/streampark.ftl
@@ -11,84 +11,89 @@ logging:
level:
root: info
+knife4j:
+ enable: true
+ basic:
+ # basic authentication, used to access swagger-ui and doc
+ enable: false
+ username: admin
+ password: streampark
+
+springdoc:
+ api-docs:
+ enabled: true
+ swagger-ui:
+ path: /swagger-ui.html
+ packages-to-scan: org.apache.streampark.console
+
spring:
- application.name: streamx
- mvc:
- pathmatch:
- matching-strategy: ant_path_matcher
- devtools:
- restart:
- enabled: false
+ application.name: StreamPark
+ devtools.restart.enabled: false
+ mvc.pathmatch.matching-strategy: ant_path_matcher
servlet:
multipart:
enabled: true
max-file-size: 500MB
max-request-size: 500MB
- datasource:
- dynamic:
- # 是否开启 SQL日志输出,生产环境建议关闭,有性能损耗
- p6spy: false
- hikari:
- connection-timeout: 30000
- max-lifetime: 1800000
- max-pool-size: 15
- min-idle: 5
- connection-test-query: select 1
- pool-name: HikariCP-DS-POOL
- # 配置默认数据源
- primary: primary
- datasource:
- # 数据源-1,名称为 primary
- primary:
- username: ${username}
- password: ${password}
- driver-class-name: com.mysql.cj.jdbc.Driver
- url: ${databaseUrl}
aop.proxy-target-class: true
messages.encoding: utf-8
jackson:
date-format: yyyy-MM-dd HH:mm:ss
time-zone: GMT+8
+ deserialization:
+ fail-on-unknown-properties: false
main:
allow-circular-references: true
banner-mode: off
-
+ mvc:
+ converters:
+ preferred-json-mapper: jackson
+ datasource:
+ username: ${username}
+ password: ${password}
+ driver-class-name: com.mysql.cj.jdbc.Driver
+ url: ${databaseUrl}
management:
endpoints:
web:
exposure:
- include: [ 'httptrace', 'metrics' ]
+ include: [ 'health', 'httptrace', 'metrics' ]
+ endpoint:
+ health:
+ enabled: true
+ show-details: always
+ probes:
+ enabled: true
+ health:
+ ldap:
+ enabled: false
-#mybatis plus 设置
-mybatis-plus:
- type-aliases-package: com.streamxhub.streamx.console.*.entity
- mapper-locations: classpath:mapper/*/*.xml
- configuration:
- jdbc-type-for-null: null
- global-config:
- db-config:
- id-type: auto
- # 关闭 mybatis-plus的 banner
- banner: false
+streampark:
+ proxy:
+ # knox process address https://cdpsit02.example.cn:8443/gateway/cdp-proxy/yarn
+ yarn-url:
+ # lark alert proxy,default https://open.feishu.cn
+ lark-url:
+ yarn:
+ # default sample, or kerberos
+ http-auth: sample
-streamx:
# HADOOP_USER_NAME
hadoop-user-name: ${hadoopUserName}
- # 本地的工作空间,用于存放项目源码,构建的目录等.
+ # local workspace, used to store source code and build dir etc.
workspace:
- local: /opt/datasophon/streampark-1.2.3/workspace
- remote: hdfs://nameservice1/streamx # support hdfs:///streamx/ 、 /streamx 、hdfs://host:ip/streamx/
+ local: ${workspaceLocal}
+ remote: ${workspaceRemote} # support hdfs:///streamx/ 、 /streamx 、hdfs://host:ip/streamx/
- # remote docker register namespace for streamx
+ # remote docker register namespace for streampark
docker:
- register:
- image-namespace: streamx
# instantiating DockerHttpClient
http-client:
max-connections: 10000
connection-timeout-sec: 10000
response-timeout-sec: 12000
+ docker-host: ""
# flink-k8s tracking configuration
flink-k8s:
@@ -109,25 +114,20 @@ streamx:
exec-cron: 0 0 0/6 * * ?
shiro:
- # token有效期,单位秒
+ # token timeout, unit second
jwtTimeOut: 86400
- # 后端免认证接口 url
+ # backend authentication-free resources url
anonUrl: >
- /passport/**,
- /systemName,
- /user/check/**,
- /websocket/**,
- /metrics/**,
- /index.html,
- /assets/**,
- /css/**,
- /fonts/**,
- /img/**,
- /js/**,
- /loading/**,
- /*.js,
- /*.png,
- /*.jpg,
- /*.less,
- /
+ldap:
+ # Is ldap enabled? If so, please modify the urls
+ enable: false
+ ## AD server IP, default port 389
+ urls: ldap://99.99.99.99:389
+ ## Login Account
+ base-dn: dc=streampark,dc=com
+ username: cn=Manager,dc=streampark,dc=com
+ password: streampark
+ user:
+ identity-attribute: uid
+ email-attribute: mail
diff --git a/docs/zh/ApacheKyuubi.md b/docs/zh/ApacheKyuubi.md
new file mode 100644
index 00000000..85bfe373
--- /dev/null
+++ b/docs/zh/ApacheKyuubi.md
@@ -0,0 +1,68 @@
+# ApacheKyuubi
+
+##概述
+`
+ ApacheKyuubi是一个分布式多租户网关,支持Spark,Flink,Hive等计算引擎,
+依赖Kyuubi我们可以更方便的对数据湖组件进行集成.
+`
+##连接器说明
+`
+目前默认对spark做了集成,如果需要对其他引擎或者数据湖做集成可以参考:https://kyuubi.readthedocs.io/en/v1.7.3/connector/index.html
+`
+##服务认证
+`
+ ApacheKyuubi对于认证支持多种方式,默认对Kerberos做了集成,只需要在安装时打开相关选项即可,如果
+需要集成其他认证模式可以参考:https://kyuubi.readthedocs.io/en/v1.7.3/security/index.html
+`
+##权限集成
+`
+在使用Spark引擎时我们可以借助ApacheKyuubi提供的RangerAuth插件使用现有的hive权限策略实现统一的权限
+管理,目前在集成时没有对这部分做集成(集成的方式是SparkExtension需要改动Spark的相关配置),需要使用权限
+可以参考:https://kyuubi.readthedocs.io/en/v1.7.3/security/authorization/spark/index.html
+`
+##简单使用说明
+
+###这里以Spark引擎为示例:
+
+####HA连接:
+```
+beeline -u 'jdbc:hive2://zkhost:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=kyuubi_ns;principal=kyuubi/_HOST@HADOOP.COM'
+```
+####指定服务器IP连接
+```
+beeline -u 'jdbc:hive2://serverhost:10009/;principal=kyuubi/_HOST@HADOOP.COM' -nuserName
+```
+#####连接说明
+```
+我们在集成时默认是以HA的方式集成的,ApacheKyuubi高可用需要借助Zookeeper,因此这里的zkhost:2181是指我们的zk集群信息,serviceDiscoveryMode说明
+使用zk做服务发现,zooKeeperNamespace是zk的path信息,principal是在开启了Kerberos认证时需要指定的用户身份信息(注意这里的票据信息是固定的即Server端配置的信息,
+_HOST是通配)
+```
+
+####Sql查询
+```
+#查询方面与Hive beeline 没有区别两者等价,语法方面可以参考对应的Spark版本,如下执行show tables语句(这里删除打印的其他日志)
+0: jdbc:hive2://192.168.163.127:2181/> show tables;
++-----------+------------+--------------+
+| database | tableName | isTemporary |
++-----------+------------+--------------+
+| default | my_table3 | false |
+| default | my_table4 | false |
++-----------+------------+--------------+
+2 rows selected (3.875 seconds)
+
+#select查询
+0: jdbc:hive2://192.168.163.127:2181/> select 1 as col1;
++-------+
+| col1 |
++-------+
+| 1 |
++-------+
+```
+
+##其他
+```
+这里只列举简单的说明与基本使用方式,ApacheKyuubi的功能远不止于此,更详细的操作请参考官方文档:https://kyuubi.readthedocs.io/en/v1.7.3
+```
+
+
diff --git "a/docs/zh/Kerberos\345\242\236\345\212\240\347\224\250\346\210\267.md" "b/docs/zh/Kerberos\345\242\236\345\212\240\347\224\250\346\210\267.md"
new file mode 100644
index 00000000..24edfea4
--- /dev/null
+++ "b/docs/zh/Kerberos\345\242\236\345\212\240\347\224\250\346\210\267.md"
@@ -0,0 +1,128 @@
+# Kerberos增加用户
+
+1. 添加用户
+
+```shell
+#添加lzy系统用户
+[root@master bin]# adduser lzy
+#登录kadmin添加lzy/master@HADOOP.COM用户,并导出keytab
+[root@master bin]# kadmin.local
+Authenticating as principal kylin/admin@HADOOP.COM with password.
+kadmin.local: add
+add_policy add_principal addpol addprinc
+kadmin.local: addprinc lzy/master@HADOOP.COM
+WARNING: no policy specified for lzy/master@HADOOP.COM; defaulting to no policy
+Enter password for principal "lzy/master@HADOOP.COM":
+Re-enter password for principal "lzy/master@HADOOP.COM":
+Principal "lzy/master@HADOOP.COM" created.
+kadmin.local: ktadd -k /root/lzy.keytab -norandkey lzy/master@HADOOP.COM
+Entry for principal lzy/master@HADOOP.COM with kvno 1, encryption type aes256-cts-hmac-sha1-96 added to keytab WRFILE:/root/lzy.keytab.
+Entry for principal lzy/master@HADOOP.COM with kvno 1, encryption type aes128-cts-hmac-sha1-96 added to keytab WRFILE:/root/lzy.keytab.
+Entry for principal lzy/master@HADOOP.COM with kvno 1, encryption type des3-cbc-sha1 added to keytab WRFILE:/root/lzy.keytab.
+Entry for principal lzy/master@HADOOP.COM with kvno 1, encryption type arcfour-hmac added to keytab WRFILE:/root/lzy.keytab.
+Entry for principal lzy/master@HADOOP.COM with kvno 1, encryption type des-hmac-sha1 added to keytab WRFILE:/root/lzy.keytab.
+Entry for principal lzy/master@HADOOP.COM with kvno 1, encryption type des-cbc-md5 added to keytab WRFILE:/root/lzy.keytab.
+```
+
+2. hdfs授权
+
+需要启用访问控制列表dfs.namenode.acls.enabled
+
+```
+#查看hdfs的权限
+[root@master bin]# kinit -kt /etc/security/keytabs/hdfs.keytab hdfs/master@HADOOP.COM
+[root@master bin]# hadoop fs -getfacl /
+# file: /
+# owner: hdfs
+# group: supergroup
+user::rwx
+group::r-x
+mask::rwx
+other::r-x
+#设置权限,-R是递归授权,rwx是权限,最后设置路径
+[root@master bin]# hadoop fs -setfacl -R -m user:lzy:rwx /
+```
+
+
+
+3. hive和impala
+
+用hive管理员登录
+
+```shell
+[root@master ~]# kinit -kt /etc/security/keytabs/hive.keytab hive/master@HADOOP.COM
+[root@master ~]# hive
+hive> show roles;
+FAILED: SemanticException The current builtin authorization in Hive is incomplete and disabled.
+hive> set hive.security.authorization.task.factory = org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl;
+OK
+admin
+public
+Time taken: 1.36 seconds, Fetched: 2 row(s)
+hive> grant role admin to user lzy;
+hive> exit;
+
+#验证授权是否成功
+[root@master ~]# kinit -kt /root/lzy.keytab lzy/master@HADOOP.COM
+[root@master bin]# hive
+hive> show databases;
+OK
+default
+Time taken: 0.685 seconds, Fetched: 1 row(s)
+```
+
+2. hbase
+
+```shell
+ [root@master ~]# kinit -kt /etc/security/keytabs/hbase.keytab hbase/master@HADOOP.COM
+[root@master ~]# hbase shell
+hbase(main):001:0> grant 'lzy','RWXCA'
+0 row(s) in 0.6650 seconds
+
+hbase(main):002:0> exit
+[root@master ~]# kinit -kt /root/lzy.keytab lzy/master@HADOOP.COM
+[root@master ~]# hbase shell
+hbase(main):001:0> list
+TABLE
+TEXT
+1 row(s) in 0.1890 seconds
+
+=> ["TEXT"]
+hbase(main):002:0> create 'Student','StuInfo','Grades'
+0 row(s) in 4.7490 seconds
+
+=> Hbase::Table - Student
+hbase(main):003:0> list
+TABLE
+Student
+TEXT
+2 row(s) in 0.0210 seconds
+
+=> ["Student", "TEXT"]
+hbase(main):004:0> exit
+```
+
+3. kafka
+
+```shell
+#增加kylin用户
+./kafka-configs.sh --zookeeper work02:2181/kafka --alter --add-config 'SCRAM-SHA-256=[password=123456],SCRAM-SHA-512=[password=123456]' --entity-type users --entity-name lzy
+#修改kafka_client_jaas.conf
+KafkaClient {
+com.sun.security.auth.module.Krb5LoginModule required
+useTicketCache=true;
+};
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+keyTab="D:\company\kerberos\master\keytabs\lzy.keytab"
+storeKey=true
+useTicketCache=false
+principal="lzy/master@HADOOP.COM"
+serviceName=kafka;
+};
+```
+
+
+
diff --git "a/docs/zh/datasophon\351\203\250\347\275\262\350\256\260\345\275\225.pdf" "b/docs/zh/datasophon\351\203\250\347\275\262\350\256\260\345\275\225.pdf"
new file mode 100644
index 00000000..2ea0f727
Binary files /dev/null and "b/docs/zh/datasophon\351\203\250\347\275\262\350\256\260\345\275\225.pdf" differ
diff --git "a/docs/zh/datasophon\351\233\206\346\210\220Minio.md" "b/docs/zh/datasophon\351\233\206\346\210\220Minio.md"
new file mode 100644
index 00000000..ebb03f82
--- /dev/null
+++ "b/docs/zh/datasophon\351\233\206\346\210\220Minio.md"
@@ -0,0 +1,248 @@
+### 1、构建minio压缩包
+下载minio安装包:[https://dl.min.io/server/minio/release/linux-amd64/minio](https://dl.min.io/server/minio/release/linux-amd64/minio)
+```shell
+mkdir /opt/soft/tmp/minio-8.4.3
+cd /opt/soft/tmp/minio-8.4.3
+# 将Minio安装包放到当前目录
+mkdir bin
+mkdir etc
+touch ./bin/start.sh
+touch ./bin/stop.sh
+touch ./bin/status.sh
+```
+创建好的编排目录格式如下:
+```shell
+-bin
+ -start.sh
+ -stop.sh
+ -status.sh
+-ect
+-minio
+```
+编写 stop.sh 和 status.sh
+```shell
+#!/bin/bash
+
+echo "Stopping minio"
+
+pid=`ps -ef | grep 'minio server' | grep -v grep | awk '{print $2}'`
+
+if [ -n "$pid" ]
+
+then
+
+kill -9 $pid
+
+fi
+
+echo "Stop Success!"
+```
+```shell
+#!/bin/bash
+
+echo "Checking Minio Status"
+
+# 使用ps命令查找Minio进程
+pid=$(ps -ef | grep 'minio server' | grep -v grep | awk '{print $2}')
+
+if [ -n "$pid" ]; then
+ echo "Minio is running with PID $pid"
+ exit 0
+else
+ echo "Minio is not running"
+ exit 1
+fi
+```
+制作minio安装包
+```shell
+cd /opt/soft/tmp
+tar czf minio-8.4.3.tar.gz minio-8.4.3
+md5sum minio-8.4.3.tar.gz
+echo '8f766b89b11cbc15b46b9f620a20780f' > minio-8.4.3.tar.gz.md5
+```
+将安装包拷贝到各worker节点对应目录
+```shell
+cp ./minio-8.4.3.tar.gz ./minio-8.4.3.tar.gz.md5 /opt/datasophon/DDP/packages/
+```
+### 2、创建minio配置文件
+```shell
+cd /opt/apps/datasophon/datasophon-manager-1.1.2/conf/meta/DDP-1.1.2
+mkdir MINIO
+cd MINIO
+touch service_ddl.json
+```
+```shell
+{
+ "name": "MINIO",
+ "label": "MINIO",
+ "description": "s3对象存储",
+ "version": "8.4.3",
+ "sortNum": 22,
+ "dependencies": [],
+ "packageName": "minio-8.4.3.tar.gz",
+ "decompressPackageName": "minio-8.4.3",
+ "roles": [
+ {
+ "name": "MinioService",
+ "label": "MinioService",
+ "roleType": "master",
+ "cardinality": "1+",
+ "sortNum": 1,
+ "logFile": "minio.log",
+ "jmxPort": 11111,
+ "startRunner": {
+ "timeout": "60",
+ "program": "bin/start.sh",
+ "args": []
+ },
+ "stopRunner": {
+ "timeout": "60",
+ "program": "bin/stop.sh",
+ "args": []
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "bin/status.sh",
+ "args": []
+ },
+ "externalLink": {
+ "name": "minio Ui",
+ "label": "minio Ui",
+ "url": "http://${host}:${consolePort}"
+ }
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "start.sh",
+ "configFormat": "custom",
+ "outputDirectory": "bin",
+ "templateName": "minio-run.flt",
+ "includeParams": [
+ "MINIO_ACCESS_KEY",
+ "MINIO_SECRET_KEY",
+ "dataPaths",
+ "apiPort",
+ "consolePort"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "MINIO_ACCESS_KEY",
+ "label": "用户名",
+ "description": "用户名,长度最小是5个字符",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "minio"
+ },
+ {
+ "name": "MINIO_SECRET_KEY",
+ "label": "密码",
+ "description": "密码不能设置过于简单,不然minio会启动失败,长度最小是8个字符",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "Jd2019@123"
+ },
+ {
+ "name": "dataPaths",
+ "label": "集群配置文件目录",
+ "description": "集群配置文件目录,必须根据指定格式将各部署节点配置上,按空格分隔",
+ "configType": "map",
+ "required": true,
+ "separator": " ",
+ "type": "multiple",
+ "value": [
+ "http://{host}:{apiPort}/data/minio/data"
+ ],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "apiPort",
+ "label": "api访问端口",
+ "description": "api访问端口",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "9000",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "9000"
+ },
+ {
+ "name": "consolePort",
+ "label": "UI访问端口",
+ "description": "UI访问端口",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "9001",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "9001"
+ }
+ ]
+}
+```
+各worker几点创建minio-run.flt文件
+```shell
+cd /opt/datasophon/datasophon-worker/conf/templates
+touch minio-run.flt
+```
+```shell
+#!/bin/bash
+
+# 设置MinIO的配置参数
+export MINIO_ROOT_USER=${MINIO_ACCESS_KEY}
+export MINIO_ROOT_PASSWORD=${MINIO_SECRET_KEY}
+
+export MINIO_PROMETHEUS_AUTH_TYPE=public #加入这行环境变量,“public”表示Prometheus访问minio集群可以不通过身份验证
+
+/opt/datasophon/minio/minio server --config-dir /opt/datasophon/minio/etc \
+ --address "0.0.0.0:${apiPort}" --console-address ":${consolePort}" \
+ ${dataPaths} > /opt/datasophon/minio/minio.log 2>&1 &
+```
+### 3、重启datasophon
+各节点worker重启
+```shell
+sh /opt/datasophon/datasophon-worker/bin/datasophon-worker.sh restart worker
+```
+主节点重启api
+```shell
+sh /opt/apps/datasophon/datasophon-manager-1.1.2/bin/datasophon-api.sh restart api
+```
+此时可以看到mysql元数据库中 t_ddh_frame_service 和 t_ddh_frame_service_role 两个表已经添加了minio的元数据。
+### 4、安装
+安装配置样例
+
+![image](https://github.com/datavane/datasophon/assets/62798940/b7ca4c46-fcb8-4c8b-b195-e2e3d32f00c2)
+
+注意配置文件目录data文件夹必须是空的!!!
+### 5、监控
+```shell
+vim /opt/datasophon/prometheus/prometheus.yml
+# 新增配置
+ - job_name: minio_job
+ metrics_path: /minio/prometheus/metrics
+ scheme: http
+ static_configs:
+ - targets: ['192.168.1.54:9000','192.168.1.55:9000','192.168.1.56:9000']
+```
+重启prometheus
+### 6、grafana
+导入模板 [https://grafana.com/grafana/dashboards/12063](https://grafana.com/grafana/dashboards/12063)
+datasophon mysql表 t_ddh_cluster_service_dashboard 新增图标链接
+
+![image](https://github.com/datavane/datasophon/assets/62798940/95067756-41b4-428d-aeb6-b4923411c314)
diff --git "a/docs/zh/datasophon\351\233\206\346\210\220alluxio.md" "b/docs/zh/datasophon\351\233\206\346\210\220alluxio.md"
new file mode 100644
index 00000000..89bea444
--- /dev/null
+++ "b/docs/zh/datasophon\351\233\206\346\210\220alluxio.md"
@@ -0,0 +1,497 @@
+### 1、构建压缩包
+官方下载安装包 alluxio-2.9.3-bin.tar.gz
+```shell
+tar -zxvf alluxio-2.9.3-bin.tar.gz
+cd alluxio-2.9.3
+vim control_alluxio.sh
+cd ..
+tar czf alluxio-2.9.3.tar.gz alluxio-2.9.3
+mkd5sum alluxio-2.9.3.tar.gz
+echo 'bf0bf449ee28d0db8da56a5dba8ecee3' > alluxio-2.9.3.tar.gz.md5
+cp ./alluxio-2.9.3.tar.gz ./alluxio-2.9.3.tar.gz.md5 /opt/datasophon/DDP/packages
+```
+control_alluxio.sh:
+```shell
+#!/bin/bash
+
+operation=$1
+node_type=$2
+
+alluxio_start="./bin/alluxio-start.sh"
+alluxio_stop="./bin/alluxio-stop.sh"
+
+check_process() {
+ if ps -ef | grep -v grep | grep -q "$1"; then
+ return 0 # Process exists
+ else
+ return 1 # Process doesn't exist
+ fi
+}
+
+start_master() {
+ if ! check_process "AlluxioMaster"; then
+ $alluxio_start master
+ fi
+ if ! check_process "AlluxioJobMaster"; then
+ $alluxio_start job_master
+ fi
+ if ! check_process "AlluxioProxy"; then
+ $alluxio_start proxy
+ fi
+}
+
+start_worker() {
+ if ! check_process "AlluxioWorker"; then
+ $alluxio_start worker
+ fi
+ if ! check_process "AlluxioJobWorker"; then
+ $alluxio_start job_worker
+ fi
+ if ! check_process "AlluxioProxy"; then
+ $alluxio_start proxy
+ fi
+}
+
+stop_master() {
+ if check_process "AlluxioProxy"; then
+ $alluxio_stop proxy
+ fi
+ if check_process "AlluxioJobMaster"; then
+ $alluxio_stop job_master
+ fi
+ if check_process "AlluxioMaster"; then
+ $alluxio_stop master
+ fi
+}
+
+stop_worker() {
+ if check_process "AlluxioProxy"; then
+ $alluxio_stop proxy
+ fi
+ if check_process "AlluxioJobWorker"; then
+ $alluxio_stop job_worker
+ fi
+ if check_process "AlluxioWorker"; then
+ $alluxio_stop worker
+ fi
+}
+
+if [ "$operation" == "start" ]; then
+ case "$node_type" in
+ "master")
+ start_master
+ ;;
+ "worker")
+ start_worker
+ ;;
+ *)
+ echo "Invalid node type. Please use 'master' or 'worker'."
+ ;;
+ esac
+elif [ "$operation" == "stop" ]; then
+ case "$node_type" in
+ "master")
+ stop_master
+ ;;
+ "worker")
+ stop_worker
+ ;;
+ *)
+ echo "Invalid node type. Please use 'master' or 'worker'."
+ ;;
+ esac
+elif [ "$operation" == "status" ]; then
+ case "$node_type" in
+ "master")
+ if check_process "AlluxioMaster"; then
+ exit 0
+ else
+ exit 1
+ fi
+ ;;
+ "worker")
+ if check_process "AlluxioWorker"; then
+ exit 0
+ else
+ exit 1
+ fi
+ ;;
+ *)
+ echo "Invalid node type. Please use 'master' or 'worker'."
+ ;;
+ esac
+else
+ echo "Invalid operation. Please use 'start', 'stop', or 'status'."
+fi
+
+```
+### 2、配置元数据文件
+```shell
+cd /opt/apps/datasophon-manager-1.2.0/conf/meta/DDP-1.2.0
+mkdir ALLUXIO
+touch service_ddl.json
+touch properties_value.flt
+```
+将下面两个文件放进去
+
+servcie_ddl.json:
+```shell
+{
+ "name": "ALLUXIO",
+ "label": "ALLUXIO",
+ "description": "分布式内存文件系统",
+ "version": "2.9.3",
+ "sortNum": 30,
+ "dependencies": [
+ "ZOOKEEPER"
+ ],
+ "packageName": "alluxio-2.9.3.tar.gz",
+ "decompressPackageName": "alluxio-2.9.3",
+ "roles": [
+ {
+ "name": "AlluxioMaster",
+ "label": "AlluxioMaster",
+ "roleType": "master",
+ "runAs": {},
+ "cardinality": "1+",
+ "sortNum": 2,
+ "logFile": "logs/master.log",
+ "jmxPort": "",
+ "startRunner": {
+ "timeout": "600",
+ "program": "control_alluxio.sh",
+ "args": [
+ "start",
+ "master"
+ ]
+ },
+ "stopRunner": {
+ "timeout": "600",
+ "program": "control_alluxio.sh",
+ "args": [
+ "stop",
+ "master"
+ ]
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "control_alluxio.sh",
+ "args": [
+ "status",
+ "master"
+ ]
+ },
+ "externalLink": {
+ "name": "master Ui",
+ "label": "master Ui",
+ "url": "http://${host}:19999"
+ }
+ },
+ {
+ "name": "AlluxioWorker",
+ "label": "AlluxioWorker",
+ "roleType": "worker",
+ "runAs": {},
+ "cardinality": "1+",
+ "sortNum": 1,
+ "logFile": "logs/worker.log",
+ "jmxPort": "",
+ "startRunner": {
+ "timeout": "60",
+ "program": "control_alluxio.sh",
+ "args": [
+ "start",
+ "worker"
+ ]
+ },
+ "stopRunner": {
+ "timeout": "600",
+ "program": "control_alluxio.sh",
+ "args": [
+ "stop",
+ "worker"
+ ]
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "control_alluxio.sh",
+ "args": [
+ "status",
+ "worker"
+ ]
+ }
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "alluxio-site.properties",
+ "configFormat": "properties",
+ "outputDirectory": "conf",
+ "includeParams": [
+ "alluxio.master.mount.table.root.ufs",
+ "alluxio.underfs.hdfs.configuration",
+ "alluxio.master.embedded.journal.addresses",
+ "alluxio.zookeeper.enabled",
+ "alluxio.zookeeper.address",
+ "alluxio.master.journal.type",
+ "alluxio.master.journal.folder",
+ "alluxio.worker.block.heartbeat.timeout.ms",
+ "alluxio.zookeeper.session.timeout",
+ "custom.common.properties"
+ ]
+ },
+ {
+ "filename": "masters",
+ "configFormat": "custom",
+ "outputDirectory": "conf",
+ "templateName": "properties_value.ftl",
+ "includeParams": [
+ "masters"
+ ]
+ },
+ {
+ "filename": "workers",
+ "configFormat": "custom",
+ "outputDirectory": "conf",
+ "templateName": "properties_value.ftl",
+ "includeParams": [
+ "workers"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "alluxio.master.mount.table.root.ufs",
+ "label": "挂载到Alluxio根目录的底层存储URI",
+ "description": "挂载到Alluxio根目录的底层存储URI",
+ "required": true,
+ "type": "input",
+ "value": "${fs.defaultFS}/alluxio",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${fs.defaultFS}/alluxio"
+ },
+ {
+ "name": "alluxio.underfs.hdfs.configuration",
+ "label": "hdfs配置文件路径",
+ "description": "hdfs配置文件路径",
+ "required": true,
+ "type": "input",
+ "value": "${HADOOP_HOME}/etc/hadoop/core-site.xml:${HADOOP_HOME}/etc/hadoop/hdfs-site.xml",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${HADOOP_HOME}/etc/hadoop/core-site.xml:${HADOOP_HOME}/etc/hadoop/hdfs-site.xml"
+ },
+ {
+ "name": "alluxio.master.embedded.journal.addresses",
+ "label": "参加leading master选举的master节点集",
+ "description": "参加Alluxio leading master选举的master节点集",
+ "required": true,
+ "type": "input",
+ "value": "${host1}:19200,${host2}:19200,${host3}:19200",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "alluxio.zookeeper.enabled",
+ "label": "启用HA模式",
+ "description": "启用HA模式",
+ "required": true,
+ "type": "switch",
+ "value": true,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": true
+ },
+ {
+ "name": "alluxio.zookeeper.address",
+ "label": "zookeeper地址",
+ "description": "zookeeper地址",
+ "required": true,
+ "type": "input",
+ "value": "${zkUrls}",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${zkUrls}"
+ },
+ {
+ "name": "alluxio.master.journal.type",
+ "label": "",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "UFS",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "UFS"
+ },
+ {
+ "name": "alluxio.master.journal.folder",
+ "label": "共享日志位置的URI",
+ "description": "共享日志位置的URI",
+ "required": true,
+ "type": "input",
+ "value": "${fs.defaultFS}/alluxio/journal/",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${fs.defaultFS}/alluxio/journal/"
+ },
+ {
+ "name": "alluxio.worker.block.heartbeat.timeout.ms",
+ "label": "Zookeeper服务器的最小/最大session timeout",
+ "description": "Zookeeper服务器的最小/最大session timeout",
+ "required": true,
+ "type": "input",
+ "value": "300000",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "300000"
+ },
+ {
+ "name": "alluxio.zookeeper.session.timeout",
+ "label": "zookeeper连接超时时间",
+ "description": "zookeeper连接超时时间",
+ "required": true,
+ "type": "input",
+ "value": "120s",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "120s"
+ },
+ {
+ "name": "custom.common.properties",
+ "label": "自定义配置common.properties",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "masters",
+ "label": "masters",
+ "description": "masters机器的IP",
+ "required": true,
+ "separator":"\n",
+ "type": "multiple",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "workers",
+ "label": "workers",
+ "description": "workers机器的IP",
+ "required": true,
+ "separator":"\n",
+ "type": "multiple",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ }
+ ]
+}
+```
+properties_value.flt:
+```shell
+<#list itemList as item>
+${item.value}
+#list>
+```
+### 3、新增worker源码Handler
+修改 com.datasophon.worker.handler.ConfigureServiceHandler
+新增:
+```shell
+if ("AlluxioMaster".equals(serviceRoleName) && "alluxio-site.properties".equals(generators.getFilename())) {
+ ServiceConfig serviceConfig = new ServiceConfig();
+ serviceConfig.setName("alluxio.master.hostname");
+ serviceConfig.setValue(hostName);
+ customConfList.add(serviceConfig);
+}
+if ("AlluxioWorker".equals(serviceRoleName) && "alluxio-site.properties".equals(generators.getFilename())) {
+ File alluxioFile =
+ new File(Constants.INSTALL_PATH + File.separator + decompressPackageName, "conf/alluxio-site.properties");
+ if (alluxioFile.exists()) {
+ continue;
+ }
+}
+```
+
+![image](https://github.com/datavane/datasophon/assets/62798940/475ae77d-8865-457c-9699-dd4bff5e46f2)
+
+
+修改 com.datasophon.worker.strategy.ServiceRoleStrategyContext:
+```shell
+map.put("AlluxioMaster", new AlluxioHandlerStrategy("ALLUXIO", "AlluxioMaster"));
+```
+
+创建:com.datasophon.worker.strategy.AlluxioHandlerStrategy
+```shell
+package com.datasophon.worker.strategy;
+
+import com.datasophon.common.Constants;
+import com.datasophon.common.command.ServiceRoleOperateCommand;
+import com.datasophon.common.enums.CommandType;
+import com.datasophon.common.utils.ExecResult;
+import com.datasophon.common.utils.ShellUtils;
+import com.datasophon.worker.handler.ServiceHandler;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+
+public class AlluxioHandlerStrategy extends AbstractHandlerStrategy implements ServiceRoleStrategy {
+
+ public AlluxioHandlerStrategy(String serviceName, String serviceRoleName) {
+ super(serviceName, serviceRoleName);
+ }
+
+ @Override
+ public ExecResult handler(ServiceRoleOperateCommand command) throws SQLException, ClassNotFoundException {
+ ServiceHandler serviceHandler = new ServiceHandler(command.getServiceName(), command.getServiceRoleName());
+ String workPath = Constants.INSTALL_PATH + Constants.SLASH + command.getDecompressPackageName();
+
+ if (command.getCommandType().equals(CommandType.INSTALL_SERVICE)) {
+ ArrayList commands = new ArrayList<>();
+
+ logger.info("start format master");
+ commands.add(workPath + "/bin/alluxio");
+ commands.add("format");
+ ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+ logger.info("alluxio master format success");
+
+ commands.clear();
+ commands.add(workPath + "/alluxio/bin/alluxio-start.sh");
+ commands.add("all");
+ ExecResult execResult = ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+ if (execResult.getExecResult()) {
+ logger.info("alluxio start all success");
+ }
+ }
+
+ ExecResult startResult = serviceHandler.start(command.getStartRunner(), command.getStatusRunner(),
+ command.getDecompressPackageName(), command.getRunAs());
+ return startResult;
+ }
+}
+
+```
+### 4、重启
+各节点worker重启
+```shell
+sh /opt/datasophon/datasophon-worker/bin/datasophon-worker.sh restart worker debug
+```
+主节点重启api
+```shell
+sh /opt/apps/datasophon-manager-1.2.0/bin/datasophon-api.sh restart api debug
+```
+### 5、配置样例
+
+![image](https://github.com/datavane/datasophon/assets/62798940/bd626fec-c581-4c22-8f36-b582afbb7ea4)
diff --git "a/docs/zh/datasophon\351\233\206\346\210\220clickhouse.md" "b/docs/zh/datasophon\351\233\206\346\210\220clickhouse.md"
new file mode 100644
index 00000000..f19c4bae
--- /dev/null
+++ "b/docs/zh/datasophon\351\233\206\346\210\220clickhouse.md"
@@ -0,0 +1,1859 @@
+### 1、构建压缩包
+官网下载安装包
+
+![image](https://github.com/datavane/datasophon/assets/62798940/8e84d696-5854-41c2-9c9d-9574a6062d4a)
+
+各安装包解压到同一个文件夹中,同时新增bin目录,bin目录中放置status.sh
+```shell
+#!/bin/bash
+
+status_output=$(/etc/init.d/clickhouse-server status)
+
+if echo "$status_output" | grep -q "is running"; then
+ exit 0
+else
+ exit 1
+fi
+```
+压缩部署到DDP
+### 2、worker新增ck strategy
+```java
+map.put("ClickHouse", new ClickHouseHandlerStrategy("CLICKHOUSE", "ClickHouse"));
+```
+```java
+package com.datasophon.worker.strategy;
+
+import com.datasophon.common.Constants;
+import com.datasophon.common.command.ServiceRoleOperateCommand;
+import com.datasophon.common.enums.CommandType;
+import com.datasophon.common.utils.ExecResult;
+import com.datasophon.common.utils.ShellUtils;
+import com.datasophon.worker.handler.ServiceHandler;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+
+public class ClickHouseHandlerStrategy extends AbstractHandlerStrategy implements ServiceRoleStrategy {
+
+ public ClickHouseHandlerStrategy(String serviceName, String serviceRoleName) {
+ super(serviceName, serviceRoleName);
+ }
+
+ @Override
+ public ExecResult handler(ServiceRoleOperateCommand command) throws SQLException, ClassNotFoundException {
+ ServiceHandler serviceHandler = new ServiceHandler(command.getServiceName(), command.getServiceRoleName());
+ String workPath = Constants.INSTALL_PATH + Constants.SLASH + command.getDecompressPackageName();
+ if (command.getCommandType().equals(CommandType.INSTALL_SERVICE)) {
+ ArrayList commands = new ArrayList<>();
+
+ logger.info("/clickhouse-common-static-23.9.1.1854/install/doinst.sh");
+ commands.add(workPath + "/clickhouse-common-static-23.9.1.1854/install/doinst.sh");
+ ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+ logger.info("clickhouse common static install success");
+
+ logger.info("/clickhouse-common-static-dbg-23.9.1.1854/install/doinst.sh");
+ commands.clear();
+ commands.add(workPath + "/clickhouse-common-static-dbg-23.9.1.1854/install/doinst.sh");
+ ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+ logger.info("clickhouse common static dbg install success");
+
+ logger.info("/clickhouse-server-23.9.1.1854/install/doinst.sh configure");
+ commands.clear();
+ commands.add(workPath + "/clickhouse-server-23.9.1.1854/install/doinst.sh");
+ commands.add("configure");
+ ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+
+ ShellUtils.exceShell("rm -rf /etc/clickhouse-server/config.xml");
+ ShellUtils.exceShell("rm -rf /etc/clickhouse-server/users.xml");
+ ShellUtils.exceShell("cp " + workPath + "/etc/config.xml /etc/clickhouse-server");
+ ShellUtils.exceShell("cp " + workPath + "/etc/users.xml /etc/clickhouse-server");
+ ShellUtils.exceShell("chown clickhouse:clickhouse /etc/clickhouse-server/config.xml /etc/clickhouse-server/users.xml");
+ logger.info("clickhouse server install success");
+
+ logger.info("/clickhouse-client-23.9.1.1854/install/doinst.sh");
+ commands.clear();
+ commands.add(workPath + "/clickhouse-client-23.9.1.1854/install/doinst.sh");
+ ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+ logger.info("clickhouse client install success");
+
+ commands.clear();
+ commands.add("sudo");
+ commands.add("/etc/init.d/clickhouse-server");
+ commands.add("start");
+ ShellUtils.execWithStatus(workPath, commands, 300L, logger);
+ logger.info("clickhouse start success");
+ }
+
+ ExecResult startResult = serviceHandler.start(command.getStartRunner(), command.getStatusRunner(),
+ command.getDecompressPackageName(), command.getRunAs());
+ return startResult;
+ }
+}
+
+```
+worker打包替换旧包
+### 3、ck元数据文件
+```shell
+
+
+
+
+ trace
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+
+ 1000M
+ 10
+
+
+
+
+
+
+
+
+
+
+
+
+
+ https://{bucket}.s3.amazonaws.com
+
+
+ https://{bucket}.storage.googleapis.com
+
+
+ https://{bucket}.oss.aliyuncs.com
+
+
+
+
+
+
+
+
+
+
+
+ 8123
+
+
+ ${tcpPort}
+
+
+ 9004
+
+
+ 9005
+
+
+
+
+
+
+
+
+
+
+
+ 9009
+
+
+
+
+
+
+
+
+
+
+
+
+ ::
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 4096
+
+
+ 3
+
+
+
+
+ false
+
+
+ /path/to/ssl_cert_file
+ /path/to/ssl_key_file
+
+
+ false
+
+
+ /path/to/ssl_ca_cert_file
+
+
+ none
+
+
+ 0
+
+
+ -1
+ -1
+
+
+ false
+
+
+
+
+
+
+
+
+
+ none
+ true
+ true
+ sslv2,sslv3
+ true
+
+
+
+ RejectCertificateHandler
+
+
+
+
+ true
+ true
+ sslv2,sslv3
+ true
+
+
+
+ RejectCertificateHandler
+
+
+
+
+
+
+
+
+ 0
+ 0
+
+
+ 1000
+
+
+ 0
+
+
+
+ 10000
+
+
+
+
+
+ 0.9
+
+
+ 4194304
+
+
+ 0
+
+
+
+
+
+ 8589934592
+
+
+ 5368709120
+
+
+
+ 1000
+
+
+ 134217728
+
+
+ 10000
+
+ false
+
+
+ /var/lib/clickhouse/
+
+
+
+
+
+
+ /var/lib/clickhouse/tmp/
+
+
+ 1
+ 1
+ 1
+
+
+ sha256_password
+
+
+ 12
+
+
+
+
+
+
+
+
+ /var/lib/clickhouse/user_files/
+
+
+
+
+
+
+
+
+
+
+
+
+ users.xml
+
+
+
+ /var/lib/clickhouse/access/
+
+
+
+
+
+
+
+ false
+
+
+ false
+
+
+ false
+
+
+ false
+
+
+ false
+
+
+ 600
+
+
+
+ default
+
+
+ SQL_
+
+
+
+
+
+
+
+
+ default
+
+
+
+
+
+
+
+
+ true
+
+
+ false
+
+ ' | sed -e 's|.*>\(.*\)<.*|\1|')
+ wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
+ apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
+ clickhouse-jdbc-bridge &
+
+ * [CentOS/RHEL]
+ export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
+ export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|')
+ wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
+ yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
+ clickhouse-jdbc-bridge &
+
+ Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
+ ]]>
+
+
+
+
+
+ <#list itemList as item>
+ <#if item.name == "shardAddress">
+ <#list item.value?split(",") as shardAddress>
+
+
+ <#assign parts = shardAddress?split(":")>
+ ${parts?first}
+ ${parts?last}
+
+
+ #list>
+ #if>
+ #list>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 3600
+
+
+
+ 3600
+
+
+ 60
+
+
+
+
+
+
+
+
+
+
+
+
+ system
+
+
+ toYYYYMM(event_date)
+
+
+
+
+
+
+
+ 7500
+
+ 1048576
+
+ 8192
+
+ 524288
+
+ false
+
+
+
+
+
+
+
+ system
+
+
+ toYYYYMM(event_date)
+ 7500
+ 1048576
+ 8192
+ 524288
+
+ false
+
+
+
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+ 1048576
+ 8192
+ 524288
+ false
+
+
+
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+
+
+
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+ 1048576
+ 8192
+ 524288
+ false
+
+
+
+
+
+
+ system
+
+ 7500
+ 1048576
+ 8192
+ 524288
+ 1000
+ false
+
+
+
+
+ system
+
+ 7000
+ 1048576
+ 8192
+ 524288
+ false
+
+
+
+
+
+
+ engine MergeTree
+ partition by toYYYYMM(finish_date)
+ order by (finish_date, finish_time_us, trace_id)
+
+ system
+
+ 7500
+ 1048576
+ 8192
+ 524288
+ false
+
+
+
+
+
+ system
+
+
+
+ 1000
+ 1024
+ 1024
+ 512
+ true
+
+
+
+
+
+
+
+ system
+
+
+ toYYYYMM(event_date)
+ 7500
+ 1048576
+ 8192
+ 524288
+ false
+
+
+
+
+ system
+
+
+ 7500
+ 1048576
+ 8192
+ 524288
+ false
+ event_date
+ event_date + INTERVAL 3 DAY
+
+
+
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+
+
+
+
+
+
+
+
+
+ *_dictionary.*ml
+
+
+ *_function.*ml
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /clickhouse/task_queue/ddl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ click_cost
+ any
+
+ 0
+ 3600
+
+
+ 86400
+ 60
+
+
+
+ max
+
+ 0
+ 60
+
+
+ 3600
+ 300
+
+
+ 86400
+ 3600
+
+
+
+
+
+ /var/lib/clickhouse/format_schemas/
+
+
+
+
+
+
+
+
+
+ false
+
+ false
+
+
+ https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277
+
+
+
+
+
+
+
+
+
+
+ 1073741824
+ 1024
+ 1048576
+ 30000000
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
+```shell
+
+
+
+
+
+
+
+
+
+
+
+ 1
+
+
+
+
+
+
+ <${username}>
+
+ ${password}
+
+
+
+ ::/0
+
+
+
+ default
+
+
+ default
+
+
+
+ ${username}>
+
+
+
+
+
+
+
+
+
+ 3600
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+
+```
+```shell
+{
+ "name": "CLICKHOUSE",
+ "label": "ClickHouse",
+ "description": "联机分析(OLAP)列式数据库",
+ "version": "23.9.1.1854",
+ "sortNum": 29,
+ "dependencies":[
+ "ZOOKEEPER"
+ ],
+ "packageName": "clickhouse-23.9.1.1854.tar.gz",
+ "decompressPackageName": "clickhouse-23.9.1.1854",
+ "roles": [
+ {
+ "name": "ClickHouse",
+ "label": "ClickHouse",
+ "roleType": "master",
+ "cardinality": "1+",
+ "logFile": "/var/log/clickhouse-server/clickhouse-server.log",
+ "jmxPort": "",
+ "startRunner": {
+ "timeout": "60",
+ "program": "/etc/init.d/clickhouse-server",
+ "args": [
+ "start"
+ ]
+ },
+ "stopRunner": {
+ "timeout": "60",
+ "program": "/etc/init.d/clickhouse-server",
+ "args": [
+ "stop"
+ ]
+ },
+ "restartRunner": {
+ "timeout": "60",
+ "program": "/etc/init.d/clickhouse-server",
+ "args": [
+ "restart"
+ ]
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "bin/status.sh",
+ "args": []
+ }
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "config.xml",
+ "configFormat": "custom",
+ "outputDirectory": "etc",
+ "templateName": "clickhouse-server-config.flt",
+ "includeParams": [
+ "tcpPort",
+ "shardAddress",
+ "zkAddress"
+ ]
+ },
+ {
+ "filename": "users.xml",
+ "configFormat": "custom",
+ "outputDirectory": "etc",
+ "templateName": "clickhouse-user.flt",
+ "includeParams": [
+ "username",
+ "password"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "tcpPort",
+ "label": "tcp端口",
+ "description": "tcp端口",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "9010",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "9010"
+ },
+ {
+ "name": "shardAddress",
+ "label": "clickhouse所有分片地址",
+ "description": "clickhouse所有分片地址",
+ "required": true,
+ "type": "multiple",
+ "separator": ",",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "zkAddress",
+ "label": "zookeeper地址",
+ "description": "zookeeper地址",
+ "required": true,
+ "type": "multiple",
+ "separator": ",",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "username",
+ "label": "用户名",
+ "description": "用户名",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "default",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "default"
+ },
+ {
+ "name": "password",
+ "label": "密码",
+ "description": "密码",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "123456",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "123456"
+ }
+ ]
+}
+```
+部署元数据文件
+### 4、重启
+各节点worker重启
+```shell
+sh /opt/datasophon/datasophon-worker/bin/datasophon-worker.sh restart worker debug
+```
+主节点重启api
+```shell
+sh /opt/apps/datasophon-manager-1.2.0/bin/datasophon-api.sh restart api debug
+```
+### 5、clickhouse服务器完全卸载命令
+```shell
+pkill clickhouse
+rm -rf /etc/clickhouse-server
+rm -rf /etc/clickhouse-server
+rm -rf /usr/bin/clickhouse*
+rm -rf /var/log/clickhouse-server
+rm -rf /var/lib/clickhouse
+rm -rf /var/run/clickhouse-server
+rm -rf /opt/datasophon/clickhouse-23.9.1.1854
+rm -rf /opt/datasophon/clickhouse
+```
+### 6、配置样例
+![image](https://github.com/datavane/datasophon/assets/62798940/b2915f2e-3b2e-4e06-bcfe-b40d57bd1607)
\ No newline at end of file
diff --git "a/docs/zh/datasophon\351\233\206\346\210\220presto.md" "b/docs/zh/datasophon\351\233\206\346\210\220presto.md"
new file mode 100644
index 00000000..cd7a3c3d
--- /dev/null
+++ "b/docs/zh/datasophon\351\233\206\346\210\220presto.md"
@@ -0,0 +1,1888 @@
+### 1、打包安装包
+解压安装包,可以在这里对解压后的文件做修改,更改文件名,这里的文件名是和后面的配置文件对应的:
+```shell
+tar -zxvf presto-server-0.283.tar.gz
+mv presto-server-0.283 presto-0.283
+```
+将文件打包,注意这里的压缩包名也和后面配置文件对应:
+```shell
+tar czf presto-0.283.tar.gz presto-0.283
+```
+编写md5文件:
+```shell
+md5sum presto-0.283.tar.gz
+echo '84666ba9ef9b9024fa7c385af0823101' > presto-0.283.tar.gz.md5
+```
+将两个文件拷贝进对应文件夹中:
+```shell
+cp ./presto-0.283.tar.gz ./presto-0.283.tar.gz.md5 /opt/datasophon/DDP/packages
+```
+### 2、编写presto元数据
+```shell
+cd /opt/apps/datasophon/datasophon-manager-1.1.2/conf/meta/DDP-1.1.2
+mkdir PRESTO
+cd PRESTO
+vim service_ddl.json
+```
+```shell
+{
+ "name": "PRESTO",
+ "label": "Presto",
+ "description": "分布式SQL交互式查询引擎",
+ "version": "0.283",
+ "sortNum": 21,
+ "dependencies": [],
+ "packageName": "presto-0.283.tar.gz",
+ "decompressPackageName": "presto-0.283",
+ "roles": [
+ {
+ "name": "PrestoCoordinator",
+ "label": "PrestoCoordinator",
+ "roleType": "master",
+ "cardinality": "1",
+ "jmxPort": 8087,
+ "logFile": "data/var/log/server.log",
+ "startRunner": {
+ "timeout": "60",
+ "program": "bin/launcher",
+ "args": [
+ "start"
+ ]
+ },
+ "stopRunner": {
+ "timeout": "600",
+ "program": "bin/launcher",
+ "args": [
+ "stop"
+ ]
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "bin/launcher",
+ "args": [
+ "status"
+ ]
+ },
+ "restartRunner": {
+ "timeout": "60",
+ "program": "bin/launcher",
+ "args": [
+ "restart"
+ ]
+ },
+ "externalLink": {
+ "name": "Presto UI",
+ "label": "Presto UI",
+ "url": "http://${host}:7777"
+ }
+ },
+ {
+ "name": "PrestoWorker",
+ "label": "PrestoWorker",
+ "roleType": "worker",
+ "cardinality": "1+",
+ "jmxPort": 8089,
+ "logFile": "data/var/log/server.log",
+ "startRunner": {
+ "timeout": "60",
+ "program": "bin/launcher",
+ "args": [
+ "start"
+ ]
+ },
+ "stopRunner": {
+ "timeout": "600",
+ "program": "bin/launcher",
+ "args": [
+ "stop"
+ ]
+ },
+ "statusRunner": {
+ "timeout": "60",
+ "program": "bin/launcher",
+ "args": [
+ "status"
+ ]
+ },
+ "restartRunner": {
+ "timeout": "60",
+ "program": "bin/launcher",
+ "args": [
+ "restart"
+ ]
+ }
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "config.properties",
+ "configFormat": "properties",
+ "outputDirectory": "etc",
+ "includeParams": [
+ "coordinator",
+ "http-server.http.port",
+ "query.max-memory-per-node",
+ "query.max-memory",
+ "discovery.uri",
+ "custom.config.properties"
+ ]
+ },
+ {
+ "filename": "jvm.config",
+ "configFormat": "custom",
+ "outputDirectory": "etc",
+ "templateName": "presto.jvm.config.ftl",
+ "includeParams": [
+ "prestoHeapSize"
+ ]
+ },
+ {
+ "filename": "node.properties",
+ "configFormat": "properties",
+ "outputDirectory": "etc",
+ "includeParams": [
+ "node.data-dir",
+ "node.environment"
+ ]
+ },
+ {
+ "filename": "hive.properties",
+ "configFormat": "properties",
+ "outputDirectory": "etc/catalog",
+ "includeParams": [
+ "custom.hive.properties"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "coordinator",
+ "label": "coordinator",
+ "description": "coordinator",
+ "required": true,
+ "type": "input",
+ "value": "false",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "false"
+ },
+ {
+ "name": "prestoHeapSize",
+ "label": "Presto最大堆内存",
+ "description": "Presto最大堆内存",
+ "configType": "map",
+ "required": true,
+ "minValue": 0,
+ "maxValue": 64,
+ "type": "slider",
+ "value": "",
+ "unit": "GB",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "32"
+ },
+ {
+ "name": "http-server.http.port",
+ "label": "Presto Http端口",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "7777"
+ },
+ {
+ "name": "discovery.uri",
+ "label": "服务发现地址",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "http://${coordinatorHost}:7777"
+ },
+ {
+ "name": "query.max-memory-per-node",
+ "label": "每个查询在单个节点可使用最大内存",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "minValue": 0,
+ "maxValue": "30",
+ "value": "",
+ "unit": "GB",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "1GB"
+ },
+ {
+ "name": "query.max-memory",
+ "label": "总共可使用最大内存",
+ "description": "若query.max-memory-per-node = 30GB则query.max-memory = <30GB *节点数>",
+ "required": true,
+ "type": "input",
+ "minValue": 0,
+ "maxValue": "30",
+ "value": "",
+ "unit": "GB",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "30GB"
+ },
+ {
+ "name": "node.data-dir",
+ "label": "日志存储地址",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${INSTALL_PATH}/presto-0.283/data"
+ },
+ {
+ "name": "node.environment",
+ "label": "集群环境名称",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "production"
+ },
+ {
+ "name": "custom.config.properties",
+ "label": "自定义配置config.properties",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "custom.hive.properties",
+ "label": "自定义配置hive.properties",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [{"connector.name":"hive-hadoop2"},{"hive.metastore.uri":"thrift://${metastoreHost}:9083"},{"hive.config.resources":"${INSTALL_PATH}/hadoop-3.3.3/etc/hadoop/core-site.xml,${INSTALL_PATH}/hadoop-3.3.3/etc/hadoop/hdfs-site.xml"}],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": [{"connector.name":"hive-hadoop2"},{"hive.metastore.uri":"thrift://${metastoreHost}:9083"},{"hive.config.resources":"${INSTALL_PATH}/hadoop-3.3.3/etc/hadoop/core-site.xml,${INSTALL_PATH}/hadoop-3.3.3/etc/hadoop/hdfs-site.xml"}]
+ }
+ ]
+}
+```
+```shell
+cd /opt/datasophon/datasophon-worker/conf/templates
+vim presto.jvm.config.ftl
+```
+```shell
+-server
+-Xmx${prestoHeapSize}G
+-XX:-UseBiasedLocking
+-XX:G1HeapRegionSize=32M
+-XX:+ExplicitGCInvokesConcurrent
+-XX:+ExitOnOutOfMemoryError
+-XX:+HeapDumpOnOutOfMemoryError
+-XX:-OmitStackTraceInFastThrow
+-XX:ReservedCodeCacheSize=512M
+-XX:PerMethodRecompilationCutoff=10000
+-XX:PerBytecodeRecompilationCutoff=10000
+-Djdk.attach.allowAttachSelf=true
+-Djdk.nio.maxCachedBufferSize=2000000
+-XX:+UnlockDiagnosticVMOptions
+-XX:+UseAESCTRIntrinsics
+```
+### 3、修改worker源码,重新打包worker包
+修改 datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java
+新增代码
+
+![image](https://github.com/datavane/datasophon/assets/62798940/0fbf7d09-e351-4789-9aff-f911610e117f)
+
+```shell
+ if ("PrestoCoordinator".equals(serviceRoleName) && "coordinator".equals(config.getName())) {
+ logger.info("Start config presto coordinator");
+ config.setValue("true");
+ ServiceConfig serviceConfig = new ServiceConfig();
+ serviceConfig.setName("node-scheduler.include-coordinator");
+ serviceConfig.setValue("false");
+ ServiceConfig serviceConfig1 = new ServiceConfig();
+ serviceConfig1.setName("discovery-server.enabled");
+ serviceConfig1.setValue("true");
+ customConfList.add(serviceConfig);
+ customConfList.add(serviceConfig1);
+ }
+```
+将重新打包的 datasophon-worker-1.1.2.jar 文件替换到每个worker节点的 /opt/datasophon/datasophon-worker/lib
+1.2.0版本worker包名为datasophon-worker-1.1.3.jar,需要上传后改名
+### 4、重启
+各节点worker重启
+```shell
+sh /opt/datasophon/datasophon-worker/bin/datasophon-worker.sh restart worker
+```
+主节点重启api
+```shell
+sh /opt/apps/datasophon/datasophon-manager-1.1.2/bin/datasophon-api.sh restart api
+```
+此时可以看到mysql元数据库中 t_ddh_frame_service 和 t_ddh_frame_service_role 两个表已经添加了presto的元数据。
+搭建需要注意一点节点不能既是master又是worker
+### 5、集成监控
+#### 5.1 presto安装目录创建jmx配置文件
+```shell
+pwd
+/opt/datasophon/presto
+mkdir jmx
+cd jmx
+vim prometheus_config.yml
+```
+```shell
+---
+lowercaseOutputLabelNames: true
+lowercaseOutputName: true
+whitelistObjectNames: ["java.lang:type=OperatingSystem"]
+blacklistObjectNames: []
+rules:
+ - pattern: 'java.lang<>(committed_virtual_memory|free_physical_memory|free_swap_space|total_physical_memory|total_swap_space)_size:'
+ name: os_$1_bytes
+ type: GAUGE
+ attrNameSnakeCase: true
+ - pattern: 'java.lang<>((?!process_cpu_time)\w+):'
+ name: os_$1
+ type: GAUGE
+ attrNameSnakeCase: true
+```
+将 jmx_prometheus_javaagent-0.16.1.jar 放入jmx文件夹
+
+![image](https://github.com/datavane/datasophon/assets/62798940/16b9dd5d-8957-45b6-b0fc-163e47d49a25)
+
+#### 5.2 修改presto启动脚本 /opt/datasophon/presto/bin/launcher.py
+
+![image](https://github.com/datavane/datasophon/assets/62798940/820fda3d-860d-4817-a687-ffa37cf5f6a3)
+
+```shell
+#!/usr/bin/env python
+
+import errno
+import os
+import platform
+import subprocess
+import sys
+import traceback
+
+from fcntl import flock, LOCK_EX, LOCK_NB
+from optparse import OptionParser
+from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND
+from os.path import basename, dirname, exists, realpath
+from os.path import join as pathjoin
+from signal import SIGTERM, SIGKILL
+from stat import S_ISLNK
+from time import sleep
+
+COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status']
+
+LSB_NOT_RUNNING = 3
+LSB_STATUS_UNKNOWN = 4
+
+
+def find_install_path(f):
+ """Find canonical parent of bin/launcher.py"""
+ if basename(f) != 'launcher.py':
+ raise Exception("Expected file '%s' to be 'launcher.py' not '%s'" % (f, basename(f)))
+ p = realpath(dirname(f))
+ if basename(p) != 'bin':
+ raise Exception("Expected file '%s' directory to be 'bin' not '%s" % (f, basename(p)))
+ return dirname(p)
+
+
+def makedirs(p):
+ """Create directory and all intermediate ones"""
+ try:
+ os.makedirs(p)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def load_properties(f):
+ """Load key/value pairs from a file"""
+ properties = {}
+ for line in load_lines(f):
+ k, v = line.split('=', 1)
+ properties[k.strip()] = v.strip()
+ return properties
+
+
+def load_lines(f):
+ """Load lines from a file, ignoring blank or comment lines"""
+ lines = []
+ for line in open(f, 'r').readlines():
+ line = line.strip()
+ if len(line) > 0 and not line.startswith('#'):
+ lines.append(line)
+ return lines
+
+
+def try_lock(f):
+ """Try to open an exclusive lock (inheritable) on a file"""
+ try:
+ flock(f, LOCK_EX | LOCK_NB)
+ return True
+ except (IOError, OSError): # IOError in Python 2, OSError in Python 3.
+ return False
+
+
+def open_pidfile(f, mode):
+ """Open file in read/write mode (without truncating it)"""
+ fd = os.open(f, O_RDWR | O_CREAT, mode)
+ if hasattr(os, 'set_inheritable'):
+ # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors
+ # Since Python 3.4
+ os.set_inheritable(fd, True)
+ return os.fdopen(fd, 'r+')
+
+
+class Process:
+ def __init__(self, path):
+ makedirs(dirname(path))
+ self.path = path
+ self.pid_file = open_pidfile(path, 0o600)
+ self.refresh()
+
+ def refresh(self):
+ self.locked = try_lock(self.pid_file)
+
+ def clear_pid(self):
+ assert self.locked, 'pid file not locked by us'
+ self.pid_file.seek(0)
+ self.pid_file.truncate()
+
+ def write_pid(self, pid):
+ self.clear_pid()
+ self.pid_file.write(str(pid) + '\n')
+ self.pid_file.flush()
+
+ def alive(self):
+ self.refresh()
+ if self.locked:
+ return False
+
+ pid = self.read_pid()
+ try:
+ os.kill(pid, 0)
+ return True
+ except OSError as e:
+ raise Exception('Signaling pid %s failed: %s' % (pid, e))
+
+ def read_pid(self):
+ assert not self.locked, 'pid file is locked by us'
+ self.pid_file.seek(0)
+ line = self.pid_file.readline().strip()
+ if len(line) == 0:
+ raise Exception("Pid file '%s' is empty" % self.path)
+
+ try:
+ pid = int(line)
+ except ValueError:
+ raise Exception("Pid file '%s' contains garbage: %s" % (self.path, line))
+ if pid <= 0:
+ raise Exception("Pid file '%s' contains an invalid pid: %s" % (self.path, pid))
+ return pid
+
+
+def redirect_stdin_to_devnull():
+ """Redirect stdin to /dev/null"""
+ fd = os.open(os.devnull, O_RDWR)
+ os.dup2(fd, sys.stdin.fileno())
+ os.close(fd)
+
+
+def open_append(f):
+ """Open a raw file descriptor in append mode"""
+ # noinspection PyTypeChecker
+ return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644)
+
+
+def redirect_output(fd):
+ """Redirect stdout and stderr to a file descriptor"""
+ os.dup2(fd, sys.stdout.fileno())
+ os.dup2(fd, sys.stderr.fileno())
+
+
+def symlink_exists(p):
+ """Check if symlink exists and raise if another type of file exists"""
+ try:
+ st = os.lstat(p)
+ if not S_ISLNK(st.st_mode):
+ raise Exception('Path exists and is not a symlink: %s' % p)
+ return True
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ return False
+
+
+def create_symlink(source, target):
+ """Create a symlink, removing the target first if it is a symlink"""
+ if symlink_exists(target):
+ os.remove(target)
+ if exists(source):
+ os.symlink(source, target)
+
+
+def create_app_symlinks(options):
+ """
+ Symlink the 'etc' and 'plugin' directory into the data directory.
+
+ This is needed to support programs that reference 'etc/xyz' from within
+ their config files: log.levels-file=etc/log.properties
+ """
+ if options.etc_dir != pathjoin(options.data_dir, 'etc'):
+ create_symlink(
+ options.etc_dir,
+ pathjoin(options.data_dir, 'etc'))
+
+ if options.install_path != options.data_dir:
+ create_symlink(
+ pathjoin(options.install_path, 'plugin'),
+ pathjoin(options.data_dir, 'plugin'))
+
+
+def build_java_execution(options, daemon):
+ if not exists(options.config_path):
+ raise Exception('Config file is missing: %s' % options.config_path)
+ if not exists(options.jvm_config):
+ raise Exception('JVM config file is missing: %s' % options.jvm_config)
+ if not exists(options.launcher_config):
+ raise Exception('Launcher config file is missing: %s' % options.launcher_config)
+ if options.log_levels_set and not exists(options.log_levels):
+ raise Exception('Log levels file is missing: %s' % options.log_levels)
+
+ with open(os.devnull, 'w') as devnull:
+ try:
+ subprocess.check_call(['java', '-version'], stdout=devnull, stderr=devnull)
+ except (OSError, subprocess.CalledProcessError):
+ raise Exception('Java is not installed')
+
+ properties = options.properties.copy()
+
+ if exists(options.log_levels):
+ properties['log.levels-file'] = options.log_levels
+
+ if daemon:
+ properties['log.output-file'] = options.server_log
+ properties['log.enable-console'] = 'false'
+
+ jvm_properties = load_lines(options.jvm_config)
+ launcher_properties = load_properties(options.launcher_config)
+
+ try:
+ main_class = launcher_properties['main-class']
+ except KeyError:
+ raise Exception("Launcher config is missing 'main-class' property")
+
+ properties['config'] = options.config_path
+
+ system_properties = ['-D%s=%s' % i for i in properties.items()]
+ classpath = pathjoin(options.install_path, 'lib', '*')
+
+ command = ['java', '-cp', classpath]
+ command += jvm_properties + options.jvm_options + system_properties
+ config_properties = {}
+ if exists(options.config_path):
+ config_properties = load_properties(options.config_path)
+ if config_properties['coordinator'] == 'true':
+ print 'coordinator true'
+ command += ['-javaagent:/opt/datasophon/presto/jmx/jmx_prometheus_javaagent-0.16.1.jar=7778:/opt/datasophon/presto/jmx/prometheus_config.yml']
+ else:
+ command += ['-javaagent:/opt/datasophon/presto/jmx/jmx_prometheus_javaagent-0.16.1.jar=7779:/opt/datasophon/presto/jmx/prometheus_config.yml']
+ command += [main_class]
+ if options.verbose:
+ print(command)
+ print("")
+
+ env = os.environ.copy()
+
+ # set process name: https://github.com/airlift/procname
+ process_name = launcher_properties.get('process-name', '')
+ if len(process_name) > 0:
+ system = platform.system() + '-' + platform.machine()
+ shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so')
+ if exists(shim):
+ env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip()
+ env['PROCNAME'] = process_name
+
+ return command, env
+
+
+def run(process, options):
+ if process.alive():
+ print('Already running as %s' % process.read_pid())
+ return
+
+ create_app_symlinks(options)
+ args, env = build_java_execution(options, False)
+
+ makedirs(options.data_dir)
+ os.chdir(options.data_dir)
+
+ process.write_pid(os.getpid())
+
+ redirect_stdin_to_devnull()
+
+ os.execvpe(args[0], args, env)
+
+
+def start(process, options):
+ if process.alive():
+ print('Already running as %s' % process.read_pid())
+ return
+
+ create_app_symlinks(options)
+ args, env = build_java_execution(options, True)
+
+ makedirs(dirname(options.launcher_log))
+ log = open_append(options.launcher_log)
+
+ makedirs(options.data_dir)
+ os.chdir(options.data_dir)
+
+ pid = os.fork()
+ if pid > 0:
+ process.write_pid(pid)
+ print('Started as %s' % pid)
+ return
+
+ os.setsid()
+
+ redirect_stdin_to_devnull()
+ redirect_output(log)
+ os.close(log)
+
+ os.execvpe(args[0], args, env)
+
+
+def terminate(process, signal, message):
+ if not process.alive():
+ print('Not running')
+ return
+
+ pid = process.read_pid()
+
+ while True:
+ try:
+ os.kill(pid, signal)
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise Exception('Signaling pid %s failed: %s' % (pid, e))
+
+ if not process.alive():
+ process.clear_pid()
+ break
+
+ sleep(0.1)
+
+ print('%s %s' % (message, pid))
+
+
+def stop(process):
+ terminate(process, SIGTERM, 'Stopped')
+
+
+def kill(process):
+ terminate(process, SIGKILL, 'Killed')
+
+
+def status(process):
+ if not process.alive():
+ print('Not running')
+ sys.exit(LSB_NOT_RUNNING)
+ print('Running as %s' % process.read_pid())
+
+
+def handle_command(command, options):
+ process = Process(options.pid_file)
+ if command == 'run':
+ run(process, options)
+ elif command == 'start':
+ start(process, options)
+ elif command == 'stop':
+ stop(process)
+ elif command == 'restart':
+ stop(process)
+ start(process, options)
+ elif command == 'kill':
+ kill(process)
+ elif command == 'status':
+ status(process)
+ else:
+ raise AssertionError('Unhandled command: ' + command)
+
+
+def create_parser():
+ commands = 'Commands: ' + ', '.join(COMMANDS)
+ parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands)
+ parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely')
+ parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc')
+ parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties')
+ parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties')
+ parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config')
+ parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties')
+ parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties')
+ parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH')
+ parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid')
+ parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)')
+ parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)')
+ parser.add_option('-J', action='append', metavar='OPT', dest='jvm_options', help='Set a JVM option')
+ parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property')
+ return parser
+
+
+def parse_properties(parser, args):
+ properties = {}
+ for arg in args:
+ if '=' not in arg:
+ parser.error('property is malformed: %s' % arg)
+ key, value = [i.strip() for i in arg.split('=', 1)]
+ if key == 'config':
+ parser.error('cannot specify config using -D option (use --config)')
+ if key == 'log.output-file':
+ parser.error('cannot specify server log using -D option (use --server-log-file)')
+ if key == 'log.levels-file':
+ parser.error('cannot specify log levels using -D option (use --log-levels-file)')
+ properties[key] = value
+ return properties
+
+
+def print_options(options):
+ if options.verbose:
+ for i in sorted(vars(options)):
+ print("%-15s = %s" % (i, getattr(options, i)))
+ print("")
+
+
+class Options:
+ pass
+
+
+def main():
+ parser = create_parser()
+
+ (options, args) = parser.parse_args()
+
+ if len(args) != 1:
+ if len(args) == 0:
+ parser.error('command name not specified')
+ else:
+ parser.error('too many arguments')
+ command = args[0]
+
+ if command not in COMMANDS:
+ parser.error('unsupported command: %s' % command)
+
+ try:
+ install_path = find_install_path(sys.argv[0])
+ except Exception as e:
+ print('ERROR: %s' % e)
+ sys.exit(LSB_STATUS_UNKNOWN)
+
+ o = Options()
+ o.verbose = options.verbose
+ o.install_path = install_path
+ o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties'))
+ o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc'))
+ o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties'))
+ o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config'))
+ o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties'))
+ o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties'))
+ o.log_levels_set = bool(options.log_levels_file)
+ o.jvm_options = options.jvm_options or []
+
+ if options.node_config and not exists(o.node_config):
+ parser.error('Node config file is missing: %s' % o.node_config)
+
+ node_properties = {}
+ if exists(o.node_config):
+ node_properties = load_properties(o.node_config)
+
+ data_dir = node_properties.get('node.data-dir')
+ o.data_dir = realpath(options.data_dir or data_dir or o.install_path)
+
+ o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid'))
+ o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log'))
+ o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log'))
+
+ o.properties = parse_properties(parser, options.properties or {})
+ for k, v in node_properties.items():
+ if k not in o.properties:
+ o.properties[k] = v
+
+ if o.verbose:
+ print_options(o)
+
+ try:
+ handle_command(command, o)
+ except SystemExit:
+ raise
+ except Exception as e:
+ if o.verbose:
+ traceback.print_exc()
+ else:
+ print('ERROR: %s' % e)
+ sys.exit(LSB_STATUS_UNKNOWN)
+
+
+if __name__ == '__main__':
+ main()
+
+```
+#### 5.3 修改Prometheus配置文件
+```shell
+vim /opt/datasophon/prometheus/prometheus.yml
+```
+新增presto配置
+```shell
+ - job_name: 'prestocoordinator'
+ file_sd_configs:
+ - files:
+ - configs/prestocoordinator.json
+ - job_name: 'prestoworker'
+ file_sd_configs:
+ - files:
+ - configs/prestoworker.json
+```
+在 /opt/datasophon/prometheus/configs 目录新增 prestocoordinator.json 和 prestoworker.json 配置文件
+```shell
+[
+ {
+ "targets":["hadoop1:7778"]
+ }
+]
+```
+```shell
+[
+ {
+ "targets":["hadoop2:7779","hadoop3:7779"]
+ }
+]
+```
+重启prometheus,访问webui可看到采集过来的指标
+[http://hadoop1:9090/targets](http://hadoop1:9090/targets)
+
+![image](https://github.com/datavane/datasophon/assets/62798940/f93a3ad1-64c6-463c-b989-c7c7af93cd82)
+
+#### 5.4 绘制grafana
+打开grafana ui
+
+![image](https://github.com/datavane/datasophon/assets/62798940/369c0997-5a5e-44ce-bcc8-5163360b240c)
+
+将下面json粘贴进去
+```shell
+{
+ "annotations": {
+ "list": [
+ {
+ "$$hashKey": "object:7978",
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "",
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "gnetId": 10866,
+ "graphTooltip": 0,
+ "id": 42,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "dateTimeAsIso"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 6,
+ "x": 0,
+ "y": 0
+ },
+ "id": 16,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 38
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "process_start_time_seconds{job=\"prestocoordinator\"}*1000",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "PrestoCoordinator启动时间",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 4,
+ "x": 6,
+ "y": 0
+ },
+ "id": 34,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 38
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "time() - process_start_time_seconds{job=\"prestocoordinator\"}",
+ "interval": "",
+ "legendFormat": "",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "PrestoCoordinator运行时长",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 4,
+ "x": 10,
+ "y": 0
+ },
+ "id": 20,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 38
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "jvm_memory_bytes_max{job=\"prestocoordinator\",area=\"heap\"}",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Presto最大堆内存",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "%"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 4,
+ "x": 14,
+ "y": 0
+ },
+ "id": 28,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "jvm_memory_bytes_used{area=\"heap\",job=\"prestocoordinator\"}*100/jvm_memory_bytes_max{area=\"heap\",job=\"prestocoordinator\"}",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "PrestoCoordinator堆内存使用率",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 4,
+ "x": 18,
+ "y": 0
+ },
+ "id": 24,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 38
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "sum(up{job=\"prestoworker\"})",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "在线Worker数",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Usage %"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "bars"
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6d1f62",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.lineWidth",
+ "value": 0
+ },
+ {
+ "id": "unit",
+ "value": "percentunit"
+ },
+ {
+ "id": "min",
+ "value": 0
+ },
+ {
+ "id": "max",
+ "value": 1
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 4
+ },
+ "id": 18,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.6",
+ "repeat": "memarea",
+ "repeatDirection": "h",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "jvm_memory_bytes_used{area=\"heap\",job=\"prestocoordinator\"}",
+ "legendFormat": "已用内存",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": " jvm_memory_bytes_max{area=\"heap\",job=\"prestocoordinator\"}",
+ "hide": false,
+ "legendFormat": "总内存",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "jvm_memory_bytes_used{area=\"heap\",job=\"prestocoordinator\"} / jvm_memory_bytes_max >= 0",
+ "hide": false,
+ "legendFormat": "使用率",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "PrestoCoordinator堆内存使用趋势",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 4
+ },
+ "id": 26,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "increase(jvm_gc_collection_seconds_sum{job=\"prestocoordinator\"}[$__interval])",
+ "format": "time_series",
+ "interval": "60s",
+ "intervalFactor": 1,
+ "legendFormat": "{{gc}}",
+ "metric": "jvm_gc_collection_seconds_sum",
+ "range": true,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "title": "PrestoCoordinator GC时间趋势图",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "instance"
+ },
+ "properties": [
+ {
+ "id": "displayName",
+ "value": "PrestoWorker"
+ },
+ {
+ "id": "unit",
+ "value": "short"
+ },
+ {
+ "id": "decimals",
+ "value": 2
+ },
+ {
+ "id": "custom.align"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value"
+ },
+ "properties": [
+ {
+ "id": "displayName",
+ "value": "堆内存使用率"
+ },
+ {
+ "id": "custom.align",
+ "value": "left"
+ },
+ {
+ "id": "custom.displayMode",
+ "value": "lcd-gauge"
+ },
+ {
+ "id": "min",
+ "value": 1
+ },
+ {
+ "id": "max",
+ "value": 100
+ },
+ {
+ "id": "thresholds",
+ "value": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "yellow",
+ "value": 80
+ },
+ {
+ "color": "red",
+ "value": 90
+ }
+ ]
+ }
+ },
+ {
+ "id": "color",
+ "value": {
+ "mode": "continuous-GrYlRd"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "area"
+ },
+ "properties": [
+ {
+ "id": "custom.hidden",
+ "value": true
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "job"
+ },
+ "properties": [
+ {
+ "id": "custom.hidden",
+ "value": true
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Time"
+ },
+ "properties": [
+ {
+ "id": "custom.hidden",
+ "value": true
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 13
+ },
+ "id": 36,
+ "interval": "",
+ "links": [],
+ "options": {
+ "footer": {
+ "enablePagination": true,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "frameIndex": 0,
+ "showHeader": true,
+ "sortBy": [
+ {
+ "desc": true,
+ "displayName": "PrestoWorker"
+ }
+ ]
+ },
+ "pluginVersion": "9.1.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "editorMode": "code",
+ "expr": "jvm_memory_bytes_used{area=\"heap\",job=\"prestoworker\"}*100/jvm_memory_bytes_max{area=\"heap\",job=\"prestoworker\"}",
+ "format": "table",
+ "instant": true,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "broker",
+ "refId": "A"
+ }
+ ],
+ "title": "PrestoWorker内存使用率",
+ "transformations": [],
+ "type": "table"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "isNone": true,
+ "selected": false,
+ "text": "None",
+ "value": ""
+ },
+ "datasource": {
+ "type": "prometheus",
+ "uid": "hj6gjW44z"
+ },
+ "definition": "label_values(up{job=\"trino\"},instance)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "节点",
+ "multi": false,
+ "name": "node",
+ "options": [],
+ "query": {
+ "query": "label_values(up{job=\"trino\"},instance)",
+ "refId": "Prometheus-node-Variable-Query"
+ },
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-6h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Presto",
+ "uid": "7Iy7ibMIz",
+ "version": 13,
+ "weekStart": ""
+}
+```
+#### 5.5 添加dotasophon presto模块总览
+在grafana中复制面板链接
+
+![image](https://github.com/datavane/datasophon/assets/62798940/02443af5-90ff-4dc8-9cbd-d42fee7b2ca4)
+
+打开datasophon mysql t_ddh_cluster_service_dashboard表,添加presto面板
+注意复制的面板连接后面要拼上&kiosk,如下图:
+
+![image](https://github.com/datavane/datasophon/assets/62798940/977f9796-00ca-4016-82a9-d0f663659a00)
+
+集成好的监控长这样
+
+![image](https://github.com/datavane/datasophon/assets/62798940/d15fcc17-16bf-4604-acf8-014f29ae7713)
+
+#### 5.6 集成告警
+在 /opt/datasophon/prometheus/alert_rules 目录中添加presto告警配置文件 presto.yml
+```shell
+groups:
+- name: PRESTO
+ # rules:定义规则
+ rules:
+ # alert:告警规则的名称
+ - alert: PrestoCoordinator进程存活
+ expr: up{job="prestocoordinator"} != 1
+ for: 15s
+ labels:
+ # severity: 指定告警级别。有三种等级,分别为warning、critical和emergency。严重等级依次递增。
+ severity: exception
+ clusterId: 1
+ serviceRoleName: PrestoCoordinator
+ annotations:
+ # summary描述告警的概要信息
+ # description用于描述告警的详细信息。
+ summary: 重新启动
+ description: "{{ $labels.job }}的{{ $labels.instance }}实例产生告警"
+ - alert: PrestoWorker进程存活
+ expr: up{job="prestoworker"} != 1
+ for: 15s
+ labels:
+ # severity: 指定告警级别。有三种等级,分别为warning、critical和emergency。严重等级依次递增。
+ severity: exception
+ clusterId: 1
+ serviceRoleName: PrestoWorker
+ annotations:
+ # summary描述告警的概要信息
+ # description用于描述告警的详细信息。
+ summary: 重新启动
+ description: "{{ $labels.job }}的{{ $labels.instance }}实例产生告警"
+```
+重启prometheus,可以在UI上看到已经添加了告警
+
+![image](https://github.com/datavane/datasophon/assets/62798940/75709858-b641-425c-b87f-f838a5dea1fc)
diff --git "a/docs/zh/\344\270\272DataSophon\345\210\266\344\275\234dolphinscheduler-3.1.8\345\256\211\350\243\205\345\214\205.md" "b/docs/zh/\344\270\272DataSophon\345\210\266\344\275\234dolphinscheduler-3.1.8\345\256\211\350\243\205\345\214\205.md"
new file mode 100644
index 00000000..275f5a15
--- /dev/null
+++ "b/docs/zh/\344\270\272DataSophon\345\210\266\344\275\234dolphinscheduler-3.1.8\345\256\211\350\243\205\345\214\205.md"
@@ -0,0 +1,77 @@
+# 为DataSophon制作dolphinscheduler-3.1.8安装包
+
+### DataSophon修改datasophopn-manager中conf/meat/DDP-1.2.0/DS/service_ddl.json,修改以下参数
+```
+"version": "3.1.8",
+"packageName": "dolphinscheduler-3.1.8.tar.gz",
+"decompressPackageName": "dolphinscheduler-3.1.8",
+```
+
+### 下载apache-dolphinscheduler-3.1.8-bin.tar.gz包,在服务器中解压缩
+```shell
+tar -xvf ./apache-dolphinscheduler-3.1.8-bin.tar.gz
+```
+### 修改文件名称,主要是要与上面decompressPackageName一致
+```shell
+mv apache-dolphinscheduler-3.1.8-bin dolphinscheduler-3.1.8
+```
+### 增加jmx文件夹
+```shell
+cp jmx dolphinscheduler-3.1.8
+```
+### 修改以下脚本的启动命令使jmx生效
+./dolphinscheduler-3.1.8/alert-server/bin/start.sh
+
+./dolphinscheduler-3.1.8/api-server/bin/start.sh
+
+./dolphinscheduler-3.1.8/master-server/bin/start.sh
+
+./dolphinscheduler-3.1.8/worker-server/bin/start.sh
+
+```主要是JAVA_OPTS中添加了jmx内容
+JAVA_OPTS=${JAVA_OPTS:-"-server -javaagent:$BIN_DIR/../../jmx/jmx_prometheus_javaagent-0.16.1.jar=12359:$BIN_DIR/../../jmx/prometheus_config.yml -Duser.timezone=${SPRING_JACKSON_TIME_ZONE} -Xms1g -Xmx1g -Xmn512m -XX:+PrintGCDetails -Xloggc:gc.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=dump.hprof"}
+```
+### jmx的端口号需要和service_ddl.json中的jmx端口号一致
+* api-server:12356
+* master-server:12357
+* worker-server:12358
+* alert-server:12359
+
+### 修改dolphinscheduler-3.1.8/bin/dolphinscheduler-daemon.sh脚本,在几乎使最下方的地方$state == "STOP"的地方增加一行exit 1
+```shell
+(status)
+ get_server_running_status
+ if [[ $state == "STOP" ]]; then
+ # font color - red
+ state="[ \033[1;31m $state \033[0m ]"
+ #增加一行,使得DataSophon执行脚本时可以有返回值判断状态
+ exit 1
+ else
+ # font color - green
+ state="[ \033[1;32m $state \033[0m ]"
+ fi
+ echo -e "$command $state"
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+```
+
+### 增加mysql依赖包,将mysql8依赖包放入到每个组件的lib中
+参考ds的说明https://github.com/apache/dolphinscheduler/blob/3.1.8-release/docs/docs/zh/guide/howto/datasource-setting.md
+
+### 打压缩包
+```shell
+tar -zcvf dolphinscheduler-3.1.8.tar.gz dolphinscheduler-3.1.8
+```
+### 生成md5文件
+```shell
+md5sum dolphinscheduler-3.1.8.tar.gz > dolphinscheduler-3.1.8.tar.gz.md5
+```
+### 将dolphinscheduler-3.1.8.tar.gz和dolphinscheduler-3.1.8.tar.gz.md5上传到DataSophon的安装包中
+```shell
+cp ./dolphinscheduler-3.1.8.tar.gz /opt/datasophon/DDP/packages/
+cp ./dolphinscheduler-3.1.8.tar.gz.md5 /opt/datasophon/DDP/packages/
+```
\ No newline at end of file
diff --git "a/docs/zh/\344\270\272DataSophon\345\210\266\344\275\234streampark-2.1.1\345\256\211\350\243\205\345\214\205.md" "b/docs/zh/\344\270\272DataSophon\345\210\266\344\275\234streampark-2.1.1\345\256\211\350\243\205\345\214\205.md"
new file mode 100644
index 00000000..4eb4f06e
--- /dev/null
+++ "b/docs/zh/\344\270\272DataSophon\345\210\266\344\275\234streampark-2.1.1\345\256\211\350\243\205\345\214\205.md"
@@ -0,0 +1,86 @@
+# 为DataSophon制作streampark-2.1.1安装包.md
+
+## 下载并解压streampark 2.1.1安装包
+```shell
+tar -xzvf apache-streampark_2.12-2.1.1-incubating-bin.tar.gz
+```
+
+## 修改安装包目录名称
+保持和service_ddl.json中 decompressPackageName 一致
+```shell
+mv apache-streampark_2.12-2.1.1-incubating-bin streampark-2.1.1
+```
+
+## 修改`streampark-2.1.1/bin/streampark.sh`文件
+
+- 在DEFAULT_OPTS(原249行)中增加prometheus_javaagent配置,如下:
+```shell
+DEFAULT_OPTS="""
+ -ea
+ -server
+ -javaagent:$APP_HOME/jmx/jmx_prometheus_javaagent-0.16.1.jar=10086:$APP_HOME/jmx/prometheus_config.yml
+ -Xms1024m
+ -Xmx1024m
+ -Xmn256m
+ -XX:NewSize=100m
+ -XX:+UseConcMarkSweepGC
+ -XX:CMSInitiatingOccupancyFraction=70
+ -XX:ThreadStackSize=512
+ -Xloggc:${APP_HOME}/logs/gc.log
+ """
+```
+
+- 在start函数中,`local workspace=...略`(原380行)下一行,增加 `mkdir-p $workspace`,如下
+```shell
+ local workspace=$(echo "$conf_streampark_workspace_local" | sed 's/#.*$//g')
+ mkdir -p $workspace
+ if [[ ! -d $workspace ]]; then
+ echo_r "ERROR: streampark.workspace.local: \"$workspace\" is invalid path, Please reconfigure in application.yml"
+ echo_r "NOTE: \"streampark.workspace.local\" Do not set under APP_HOME($APP_HOME). Set it to a secure directory outside of APP_HOME. "
+ exit 1;
+ fi
+ if [[ ! -w $workspace ]] || [[ ! -r $workspace ]]; then
+ echo_r "ERROR: streampark.workspace.local: \"$workspace\" Permission denied! "
+ exit 1;
+ fi
+```
+
+- 修改status函数(原582行)中增加`exit 1`,如下:
+```shell
+status() {
+ # shellcheck disable=SC2155
+ # shellcheck disable=SC2006
+ local PID=$(get_pid)
+ if [ $PID -eq 0 ]; then
+ echo_r "StreamPark is not running"
+ exit 1
+ else
+ echo_g "StreamPark is running pid is: $PID"
+ fi
+}
+```
+
+## 增加jmx文件夹
+```shell
+ cp jmx streampark-2.1.1
+```
+
+## copy mysql8驱动包至lib目录
+(streampark从某个版本后把mysql驱动包移除了)
+```shell
+cp mysql-connector-java-8.0.28.jar streampark-2.1.1/lib/
+```
+
+## copy streampark安装包内的 mysql-schema.sql 和 mysql-data.sql 脚本出来备用
+```shell
+cp streampark-2.1.1/script/schema/mysql-schema.sql ./streampark_mysql-schema.sql
+cp streampark-2.1.1/script/data/mysql-data.sql ./streampark_mysql-data.sql
+```
+
+## 打压缩包并生成md5
+```shell
+tar -czf streampark-2.1.1.tar.gz streampark-2.1.1
+md5sum streampark-2.1.1.tar.gz | awk '{print $1}' >streampark-2.1.1.tar.gz.md5
+```
+
+
diff --git "a/docs/zh/\345\215\207\347\272\247flink1.15\345\210\260flink1.16.2.md" "b/docs/zh/\345\215\207\347\272\247flink1.15\345\210\260flink1.16.2.md"
new file mode 100644
index 00000000..5efe15e3
--- /dev/null
+++ "b/docs/zh/\345\215\207\347\272\247flink1.15\345\210\260flink1.16.2.md"
@@ -0,0 +1,200 @@
+### 1、构建压缩包
+下载flink官方包 flink-1.16.2-bin-scala_2.12.tgz
+```shell
+tar -zxvf flink-1.16.2-bin-scala_2.12.tgz
+tar czf flink-1.16.2.tar.gz flink-1.16.2
+
+# 默认支持hudi
+cp ./hudi-flink1.16-bundle-0.13.0.jar /flink-1.16.2/lib
+
+md5sum flink-1.16.2.tar.gz
+echo '8d6c243ebc9bf58d3ee3e45e5c6509f4' > flink-1.16.2.tar.gz.md5
+cp ./flink-1.16.2.tar.gz ./flink-1.16.2.tar.gz.md5 /opt/datasophon/DDP/packages/
+```
+### 2、修改service_ddl.json
+```shell
+vim /opt/apps/datasophon-manager-1.2.0/conf/meta/DDP-1.2.0/FLINK/service_ddl.json
+```
+```shell
+{
+ "name": "FLINK",
+ "label": "Flink",
+ "description": "实时计算引擎",
+ "version": "1.16.2",
+ "sortNum": 6,
+ "dependencies":[],
+ "packageName": "flink-1.16.2.tar.gz",
+ "decompressPackageName": "flink-1.16.2",
+ "runAs":"root",
+ "roles": [
+ {
+ "name": "FlinkClient",
+ "label": "FlinkClient",
+ "roleType": "client",
+ "cardinality": "1+",
+ "logFile": "logs/flink.log"
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "flink-conf.yaml",
+ "configFormat": "custom",
+ "templateName": "properties3.ftl",
+ "outputDirectory": "conf",
+ "includeParams": [
+ "jobmanager.memory.heap.size",
+ "taskmanager.memory.flink.size",
+ "high-availability",
+ "high-availability.storageDir",
+ "high-availability.zookeeper.quorum",
+ "high-availability.zookeeper.client.acl",
+ "high-availability.zookeeper.path.root",
+ "custom.flink.conf.yaml",
+ "classloader.check-leaked-classloader"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "jobmanager.memory.heap.size",
+ "label": "jobmanager堆内存大小",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "1600m"
+ },
+ {
+ "name": "taskmanager.memory.flink.size",
+ "label": "taskmanager堆内存大小",
+ "description": "",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "1280m"
+ },
+ {
+ "name": "enableJMHA",
+ "label": "开启JobManager高可用",
+ "description": "",
+ "required": true,
+ "type": "switch",
+ "value": false,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": false
+ },
+ {
+ "name": "high-availability",
+ "label": "使用zookeeper搭建高可用",
+ "description": "使用zookeeper搭建高可用",
+ "configWithHA": true,
+ "required": false,
+ "type": "input",
+ "value": "zookeeper",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "zookeeper"
+ },{
+ "name": "high-availability.storageDir",
+ "label": "元数据存储HDFS目录",
+ "description": "存储JobManager的元数据到HDFS",
+ "configWithHA": true,
+ "required": false,
+ "type": "input",
+ "value": "hdfs://nameservice1/flink/ha/",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "hdfs://nameservice1/flink/ha/"
+ },{
+ "name": "high-availability.zookeeper.quorum",
+ "label": "ZK集群地址",
+ "description": "配置ZK集群地址",
+ "configWithHA": true,
+ "required": false,
+ "type": "input",
+ "value": "${zkUrls}",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": ""
+ },
+ {
+ "name": "high-availability.zookeeper.path.root",
+ "label": "ZK元数据目录",
+ "description": "配置ZK元数据目录",
+ "configWithHA": true,
+ "required": false,
+ "type": "input",
+ "value": "/flink",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "/flink"
+ },
+ {
+ "name": "high-availability.zookeeper.client.acl",
+ "label": "high-availability.zookeeper.client.acl",
+ "description": "默认是 open,如果zookeeper security启用了更改成creator",
+ "configWithHA": true,
+ "required": false,
+ "type": "input",
+ "value": "open",
+ "configurableInWizard": true,
+ "hidden": true,
+ "defaultValue": "open"
+ },
+ {
+ "name": "custom.flink.conf.yaml",
+ "label": "自定义配置flink-conf.yaml",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "classloader.check-leaked-classloader",
+ "label": "禁用classloader.check",
+ "description": "禁用classloader.check",
+ "required": true,
+ "type": "switch",
+ "value": false,
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": false
+ }
+ ]
+}
+```
+### 3、修改环境变量
+```shell
+vim /etc/profile.d/datasophon-env.sh
+export FLINK_HOME=/opt/datasophon/flink-1.16.2
+export HADOOP_CLASSPATH=`hadoop classpath`
+source /etc/profile.d/datasophon-env.sh
+```
+各节点同样操作
+### 4、重启
+各节点worker重启
+```shell
+sh /opt/datasophon/datasophon-worker/bin/datasophon-worker.sh restart worker
+```
+主节点重启api
+```shell
+sh /opt/apps/datasophon-manager-1.2.0/bin/datasophon-api.sh restart api
+```
+### 5、测试
+```shell
+flink run -d -t yarn-per-job $FLINK_HOME/examples/streaming/WordCount.jar
+```
+```shell
+flink run-application -t yarn-application $FLINK_HOME/examples/streaming/TopSpeedWindowing.jar
+```
diff --git "a/docs/zh/\345\215\207\347\272\247spark3.1\345\210\2603.2.2.md" "b/docs/zh/\345\215\207\347\272\247spark3.1\345\210\2603.2.2.md"
new file mode 100644
index 00000000..aaf4c767
--- /dev/null
+++ "b/docs/zh/\345\215\207\347\272\247spark3.1\345\210\2603.2.2.md"
@@ -0,0 +1,147 @@
+### 1、构建压缩包
+下载官方包 spark-3.2.2-bin-hadoop3.2.tgz
+```shell
+tar -zxvf spark-3.2.2-bin-hadoop3.2.tgz
+mv spark-3.2.2-bin-hadoop3.2 spark-3.2.2
+
+# 默认集成hudi
+cp ./hudi-spark3.2-bundle_2.12-0.13.0.jar /spark-3.2.2/jars/
+chown hadoop:hadoop /spark-3.2.2/jars/hudi-spark3.2-bundle_2.12-0.13.0.jar
+
+tar czf spark-3.2.2.tar.gz spark-3.2.2
+md5sum spark-3.2.2.tar.gz
+echo 'eadd4bb2ce5d809ce4c8631f1e865252' > spark-3.2.2.tar.gz.md5
+cp ./spark-3.2.2.tar.gz ./spark-3.2.2.tar.gz.md5 /opt/datasophon/DDP/packages/
+```
+### 2、修改servcie_ddl.json
+```shell
+{
+ "name": "SPARK3",
+ "label": "Spark3",
+ "description": "分布式计算系统",
+ "version": "3.2.2",
+ "sortNum": 7,
+ "dependencies":[],
+ "packageName": "spark-3.2.2.tar.gz",
+ "decompressPackageName": "spark-3.2.2",
+ "roles": [
+ {
+ "name": "SparkClient3",
+ "label": "SparkClient3",
+ "roleType": "client",
+ "cardinality": "1+",
+ "logFile": "logs/hadoop-${user}-datanode-${host}.log"
+ }
+ ],
+ "configWriter": {
+ "generators": [
+ {
+ "filename": "spark-env.sh",
+ "configFormat": "custom",
+ "templateName": "spark-env.ftl",
+ "outputDirectory": "conf",
+ "includeParams": [
+ "SPARK_DIST_CLASSPATH",
+ "HADOOP_CONF_DIR",
+ "YARN_CONF_DIR",
+ "custom.spark.env.sh"
+ ]
+ },
+ {
+ "filename": "spark-defaults.conf",
+ "configFormat": "properties2",
+ "outputDirectory": "conf",
+ "includeParams": [
+ "custom.spark.defaults.conf"
+ ]
+ }
+ ]
+ },
+ "parameters": [
+ {
+ "name": "SPARK_DIST_CLASSPATH",
+ "label": "spark加载Classpath路径",
+ "description": "",
+ "required": true,
+ "configType": "map",
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "$(${HADOOP_HOME}/bin/hadoop classpath)"
+ },
+ {
+ "name": "HADOOP_CONF_DIR",
+ "label": "Hadoop配置文件目录",
+ "description": "",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${HADOOP_HOME}/etc/hadoop"
+ },{
+ "name": "YARN_CONF_DIR",
+ "label": "Yarn配置文件目录",
+ "description": "",
+ "configType": "map",
+ "required": true,
+ "type": "input",
+ "value": "",
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": "${HADOOP_HOME}/etc/hadoop"
+ },
+ {
+ "name": "custom.spark.env.sh",
+ "label": "自定义配置spark-env.sh",
+ "description": "自定义配置spark-env.sh",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ },
+ {
+ "name": "custom.spark.defaults.conf",
+ "label": "自定义配置spark-defaults.conf",
+ "description": "自定义配置",
+ "configType": "custom",
+ "required": false,
+ "type": "multipleWithKey",
+ "value": [],
+ "configurableInWizard": true,
+ "hidden": false,
+ "defaultValue": ""
+ }
+ ]
+}
+```
+### 3、修改环境变量
+```shell
+vim /etc/profile.d/datasophon-env.sh
+export SPARK_HOME=/opt/datasophon/spark-3.2.2
+```
+各节点分发
+### 4、重启
+各节点worker重启
+```shell
+sh /opt/datasophon/datasophon-worker/bin/datasophon-worker.sh restart worker
+```
+主节点重启api
+```shell
+sh /opt/apps/datasophon-manager-1.2.0/bin/datasophon-api.sh restart api
+```
+### 5、测试
+单机:
+```shell
+sh /opt/datasophon/spark-3.2.2/bin/spark-submit --class org.apache.spark.examples.SparkPi /opt/datasophon/spark-3.2.2/examples/jars/spark-examples_2.12-3.2.2.jar 12
+```
+yarn:
+```shell
+su - hdfs
+sh /opt/datasophon/spark-3.2.2/bin/spark-submit --master yarn --deploy-mode client --class org.apache.spark.examples.SparkPi /opt/datasophon/spark-3.2.2/examples/jars/spark-examples_2.12-3.2.2.jar 12
+```
diff --git a/pom.xml b/pom.xml
index 510f2bc9..60594d43 100644
--- a/pom.xml
+++ b/pom.xml
@@ -37,7 +37,7 @@
- 1.1.3
+ 1.2.0
2.4.20
3.0.1
2.6
diff --git a/website/static/img/weixing.png b/website/static/img/weixing.png
index 3c3c45a2..6dd22ca1 100644
Binary files a/website/static/img/weixing.png and b/website/static/img/weixing.png differ