From 4acb357632edf3deffb640fc7e38cd48c0f862d6 Mon Sep 17 00:00:00 2001 From: emma Date: Fri, 7 Apr 2023 15:49:05 +0200 Subject: [PATCH 01/39] add ConfVersions paramater to store multiple config --- cluster/cluster.go | 105 ++++++++++++++++++++++++++++++++++++++- cluster/cluster_has.go | 57 +++++++++++++++++++++ config/config.go | 20 ++++++++ server/server.go | 92 +++++++++++++++++++++++++++++++--- server/server_monitor.go | 20 ++++++++ 5 files changed, 286 insertions(+), 8 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index a736dbd85..239288d92 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -15,8 +15,10 @@ import ( "io/ioutil" "os" "os/exec" + "reflect" "strings" "sync" + t "text/template" "time" "github.com/BurntSushi/toml" @@ -88,6 +90,7 @@ type Cluster struct { IsNotMonitoring bool `json:"isNotMonitoring"` IsCapturing bool `json:"isCapturing"` Conf config.Config `json:"config"` + Confs *config.ConfVersion `json:"-"` CleanAll bool `json:"cleanReplication"` //used in testing Topology string `json:"topology"` Uptime string `json:"uptime"` @@ -271,7 +274,9 @@ const ( ) // Init initial cluster definition -func (cluster *Cluster) Init(conf config.Config, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { +func (cluster *Cluster) Init(confs *config.ConfVersion, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { + cluster.Confs = confs + conf := confs.ConfInit cluster.SqlErrorLog = logsql.New() cluster.SqlGeneralLog = logsql.New() cluster.crcTable = crc64.MakeTable(crc64.ECMA) // http://golang.org/pkg/hash/crc64/#pkg-constants @@ -679,6 +684,7 @@ func (cluster *Cluster) Stop() { } +/* func (cluster *Cluster) Save() error { type Save struct { @@ -726,6 +732,103 @@ func (cluster *Cluster) Save() error { } } + return nil +}*/ + +func (cluster *Cluster) Save() error { + + type Save struct { + Servers string `json:"servers"` + Crashes crashList `json:"crashes"` + SLA state.Sla `json:"sla"` + SLAHistory []state.Sla `json:"slaHistory"` + IsAllDbUp bool `json:"provisioned"` + } + + var clsave Save + clsave.Crashes = cluster.Crashes + clsave.Servers = cluster.Conf.Hosts + clsave.SLA = cluster.sme.GetSla() + clsave.IsAllDbUp = cluster.IsAllDbUp + clsave.SLAHistory = cluster.SLAHistory + + saveJson, _ := json.MarshalIndent(clsave, "", "\t") + err := ioutil.WriteFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/clusterstate.json", saveJson, 0644) + if err != nil { + return err + } + + saveQeueryRules, _ := json.MarshalIndent(cluster.QueryRules, "", "\t") + err = ioutil.WriteFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/queryrules.json", saveQeueryRules, 0644) + if err != nil { + return err + } + if cluster.Conf.ConfRewrite { + var myconf = make(map[string]config.Config) + + myconf["saved-"+cluster.Name] = cluster.Conf + + file, err := os.OpenFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/config.toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + if err != nil { + if os.IsPermission(err) { + cluster.LogPrintf(LvlInfo, "File permission denied: %s", cluster.Conf.WorkingDir+"/"+cluster.Name+"/config.toml") + } + return err + } + defer file.Close() + + values := reflect.ValueOf(myconf["saved-"+cluster.Name]) + types := values.Type() + s := "" + ss := "" + file.WriteString("[saved-" + cluster.Name + "]\n") + for i := 0; i < values.NumField(); i++ { + if values.Field(i).String() != "" { + if types.Field(i).Type.String() == "string" { + s = " " + types.Field(i).Name + " = \"" + values.Field(i).String() + "\"\n" + } + if types.Field(i).Type.String() == "bool" || types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { + s = " " + types.Field(i).Name + " = " + ss = format(" {{.}} \n", values.Field(i)) + } + file.WriteString(s) + file.WriteString(ss) + ss = "" + } + } + + } + + return nil +} + +func format(s string, v interface{}) string { + c, b := new(t.Template), new(strings.Builder) + t.Must(c.Parse(s)).Execute(b, v) + return b.String() +} + +func (cluster *Cluster) Overwrite() error { + + if cluster.Conf.ConfRewrite { + var myconf = make(map[string]config.Config) + + myconf["overwrite-"+cluster.Name] = cluster.Conf + + file, err := os.OpenFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/overwrite.toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + if err != nil { + if os.IsPermission(err) { + cluster.LogPrintf(LvlInfo, "File permission denied: %s", cluster.Conf.WorkingDir+"/"+cluster.Name+"/overwrite.toml") + } + return err + } + defer file.Close() + err = toml.NewEncoder(file).Encode(myconf) + if err != nil { + return err + } + } + return nil } diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 8d1f2a5fd..b6fac6a54 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -8,6 +8,8 @@ package cluster import ( "fmt" + "log" + "reflect" "strings" "github.com/signal18/replication-manager/config" @@ -360,3 +362,58 @@ func (cluster *Cluster) HasMonitoringCredentialsRotation() bool { } return false } + +func (cluster *Cluster) IsVariableDiffFromRepmanDefault(v string) bool { + values_clust := reflect.ValueOf(cluster.Conf) + types_clust := values_clust.Type() + + values_def := reflect.ValueOf(cluster.Confs.ConfInit) + types_def := values_def.Type() + + var val_clust reflect.Value + var val_def reflect.Value + + for i := 0; i < values_clust.NumField(); i++ { + if types_clust.Field(i).Name == v { + val_clust = values_clust.Field(i) + } + if types_def.Field(i).Name == v { + val_def = values_def.Field(i) + } + + } + + return val_clust == val_def +} + +func (cluster *Cluster) IsVariableImmutable(v string) bool { + values := reflect.ValueOf(cluster.Conf) + types := values.Type() + + values_flag := reflect.ValueOf(cluster.Confs.ConfFlag) + //types_flag := values_flag.Type() + + for i := 0; i < values.NumField(); i++ { + if types.Field(i).Name == v { + if types.Field(i).Type.String() == "string" { + if values.Field(i).String() != "" && values.Field(i).String() != values_flag.Field(i).String() { + log.Printf("TESTE IMMUTABLE val : %s (string)", values.Field(i).String()) + return true + } + } + if types.Field(i).Type.String() == "bool" { + if values.Field(i).String() != "true" { + log.Printf("TESTE IMMUTABLE val : %s (bool)", values.Field(i).String()) + return true + } + + } + if types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { + + } + } + + } + + return false +} diff --git a/config/config.go b/config/config.go index 0e416de8a..10ada3239 100644 --- a/config/config.go +++ b/config/config.go @@ -16,6 +16,7 @@ import ( "fmt" "io/ioutil" "os" + "reflect" "strconv" "strings" "time" @@ -585,6 +586,14 @@ type MyDumperMetaData struct { EndTimestamp time.Time `json:"start_timestamp" db:"start_timestamp"` } +type ConfVersion struct { + ConfDynamic Config `json:"-"` + ConfFlag Config `json:"-"` + ConfImmutable Config `json:"-"` + ConfInit Config `json:"-"` + ConfDecode Config `json:"-"` +} + const ( ConstStreamingSubDir string = "backups" ) @@ -1047,3 +1056,14 @@ func (conf *Config) GetTarballUrl(name string) (string, error) { } return "", errors.New("tarball not found in collection") } + +func (conf *Config) GetStringValue(name string) string { + values := reflect.ValueOf(conf) + types := values.Type() + for i := 0; i < values.NumField(); i++ { + if types.Field(i).Name == name { + return values.Field(i).String() + } + } + return "" +} diff --git a/server/server.go b/server/server.go index 928d51334..43d0c6c58 100644 --- a/server/server.go +++ b/server/server.go @@ -64,6 +64,8 @@ type ReplicationManager struct { ClusterList []string `json:"clusters"` Tests []string `json:"tests"` Conf config.Config `json:"config"` + ConfFlag config.Config `json:"-"` + ConfigPathList []string `json:"-"` Logs s18log.HttpLog `json:"logs"` ServicePlans []config.ServicePlan `json:"servicePlans"` ServiceOrchestrators []config.ConfigVariableType `json:"serviceOrchestrators"` @@ -83,7 +85,7 @@ type ReplicationManager struct { exit bool isStarted bool Confs map[string]config.Config - ForcedConfs map[string]config.Config + VersionConfs map[string]*config.ConfVersion grpcServer *grpc.Server `json:"-"` grpcWrapped *grpcweb.WrappedGrpcServer `json:"-"` V3Up chan bool `json:"-"` @@ -239,13 +241,33 @@ func (repman *ReplicationManager) initEmbed() error { } func (repman *ReplicationManager) InitConfig(conf config.Config) { - repman.ForcedConfs = make(map[string]config.Config) + repman.VersionConfs = make(map[string]*config.ConfVersion) // call after init if configuration file is provide + + //if repman is embed, create folders and load missing embedded files if conf.WithEmbed == "ON" { repman.initEmbed() } + + //init viper to read config file .toml fistRead := viper.GetViper() fistRead.SetConfigType("toml") + + var test config.Config + + secRead := viper.GetViper() + secRead.SetConfigType("toml") + + var repman_default config.Config + fistRead.Unmarshal(repman_default) + + repman.ConfFlag = repman_default + + //fmt.Printf("%+v\n", fistRead) + fistRead.Debug() + fmt.Printf("%+v\n", fistRead.AllSettings()) + + //if a config file is already define if conf.ConfigFile != "" { if _, err := os.Stat(conf.ConfigFile); os.IsNotExist(err) { // log.Fatal("No config file " + conf.ConfigFile) @@ -254,16 +276,20 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { fistRead.SetConfigFile(conf.ConfigFile) } else { + //adds config files by searching them in different folders fistRead.SetConfigName("config") fistRead.AddConfigPath("/etc/replication-manager/") fistRead.AddConfigPath(".") fistRead.AddConfigPath("./.replication-manager") + + //if tarball, add config path if conf.WithTarball == "ON" { fistRead.AddConfigPath("/usr/local/replication-manager/etc") if _, err := os.Stat("/usr/local/replication-manager/etc/config.toml"); os.IsNotExist(err) { log.Warning("No config file /usr/local/replication-manager/etc/config.toml") } } + //if embed, add config path if conf.WithEmbed == "ON" { if _, err := os.Stat("./.replication-manager/config.toml"); os.IsNotExist(err) { log.Warning("No config file ./.replication-manager/config.toml ") @@ -274,8 +300,11 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { } } } + //default path for cluster config conf.ClusterConfigPath = conf.WorkingDir + "/cluster.d" + //search for default section in config file and read + //setEnvPrefix is case insensitive fistRead.SetEnvPrefix("DEFAULT") err := fistRead.ReadInConfig() if err == nil { @@ -283,29 +312,41 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { "file": fistRead.ConfigFileUsed(), }).Debug("Using config file") } else { - // if _, ok := err.(fistRead.ConfigParseError); ok { //log.WithError(err).Fatal("Could not parse config file") log.Errorf("Could not parse config file: %s", err) } - // Proceed include files + //recup tous les param set dans le default + secRead = fistRead.Sub("default") + secRead.UnmarshalKey("default", test) + fmt.Printf("%+v\n", secRead) + + //from here first read as the combination of default sections variables but not forced parameters + //fmt.Printf("%+v\n", fistRead) + // Proceed include files + //if include is defined in a config file if fistRead.GetString("default.include") != "" { log.Info("Reading default section include directory: " + fistRead.GetString("default.include")) if _, err := os.Stat(fistRead.GetString("default.include")); os.IsNotExist(err) { log.Warning("Include config directory does not exist " + conf.Include) } else { + //if this path exist, set cluster config path to it conf.ClusterConfigPath = fistRead.GetString("default.include") } + //load files from the include path files, err := ioutil.ReadDir(conf.ClusterConfigPath) if err != nil { log.Infof("No config include directory %s ", conf.ClusterConfigPath) } + //read and set config from all files in the include path for _, f := range files { if !f.IsDir() && strings.HasSuffix(f.Name(), ".toml") { + file_name := strings.Split(f.Name(), ".") + cluster_name := file_name[0] fistRead.SetConfigName(f.Name()) fistRead.SetConfigFile(conf.ClusterConfigPath + "/" + f.Name()) // viper.Debug() @@ -316,6 +357,13 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { if err != nil { log.Fatal("Config error in " + conf.ClusterConfigPath + "/" + f.Name() + ":" + err.Error()) } + + //recup tous les param set dans le include + fmt.Printf("%+v\n", secRead.AllSettings()) + secRead = fistRead.Sub(cluster_name) + secRead.UnmarshalKey(cluster_name, test) + fmt.Printf("%+v\n", secRead.AllSettings()) + fmt.Printf("KEY : %s", secRead.AllKeys()) } } } else { @@ -323,15 +371,19 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { } // Proceed dynamic config - if fistRead.GetBool("default.monitoring-save-config") { + //read working dir from config if fistRead.GetString("default.monitoring-datadir") != "" { conf.WorkingDir = fistRead.GetString("default.monitoring-datadir") } + + //load files from the working dir files, err := ioutil.ReadDir(conf.WorkingDir) if err != nil { log.Infof("No working directory %s ", conf.WorkingDir) } + + //read and set config from all files in the working dir for _, f := range files { if f.IsDir() && f.Name() != "graphite" { fistRead.SetConfigName(f.Name()) @@ -352,9 +404,11 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { log.Warning("No monitoring-save-config variable in default section config change lost on restart") } + //contain a list of cluster name var strClusters string strClusters = cfgGroup + //if cluster name is empty, go discover cluster if strClusters == "" { // Discovering the clusters from all merged conf files build clusterDiscovery map strClusters = repman.DiscoverClusters(fistRead) @@ -362,12 +416,16 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { } cfgGroupIndex = 0 + //extract the default section of the config files cf1 := fistRead.Sub("Default") + //init viper to save the config vipersave := viper.GetViper() + //cf1.Debug() if cf1 == nil { log.Warning("config.toml has no [Default] configuration group and config group has not been specified") } else { + //save all default section in conf cf1.AutomaticEnv() cf1.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) cf1.SetEnvPrefix("DEFAULT") @@ -380,8 +438,12 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { repman.Conf = conf } // backupvipersave := viper.GetViper() + + //if clusters have been discovered if strClusters != "" { + //set cluster list repman.ClusterList = strings.Split(strClusters, ",") + //add config from cluster to the config map for _, cluster := range repman.ClusterList { //vipersave := backupvipersave @@ -394,17 +456,20 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { log.WithField("cluster", repman.ClusterList[cfgGroupIndex]).Debug("Default Cluster set") } else { + //add default to the clusterlist if no cluster discover repman.ClusterList = append(repman.ClusterList, "Default") log.WithField("cluster", repman.ClusterList[cfgGroupIndex]).Debug("Default Cluster set") confs["Default"] = conf } + fmt.Printf("%+v\n", fistRead.AllSettings()) repman.Confs = confs //repman.Conf = conf } func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, cluster string, conf config.Config) config.Config { + confs := new(config.ConfVersion) clusterconf := conf //vipersave := viper.GetViper() @@ -441,8 +506,11 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, cluste // fmt.Printf("include config for cluster %s %+v\n", cluster, clusterconf) } + confs.ConfImmutable = clusterconf + confs.ConfFlag = repman.ConfFlag + + fmt.Printf("%+v\n", cf2.AllSettings()) - repman.ForcedConfs[cluster] = clusterconf if clusterconf.ConfRewrite { cf3 := fistRead.Sub("saved-" + cluster) if cf3 == nil { @@ -454,7 +522,11 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, cluste // vipersave.MergeConfigMap(cf3.AllSettings()) // vipersave.Unmarshal(&clusterconf) } + confs.ConfDynamic = clusterconf } + confs.ConfInit = clusterconf + + repman.VersionConfs[cluster] = confs } return clusterconf } @@ -719,7 +791,8 @@ func (repman *ReplicationManager) StartCluster(clusterName string) (*cluster.Clu myClusterConf.ShareDir = myClusterConf.BaseDir + "/share" myClusterConf.WorkingDir = myClusterConf.BaseDir + "/data" } - repman.currentCluster.Init(myClusterConf, clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) + repman.VersionConfs[clusterName].ConfInit = myClusterConf + repman.currentCluster.Init(repman.VersionConfs[clusterName], clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) repman.Clusters[clusterName] = repman.currentCluster repman.currentCluster.SetCertificate(repman.OpenSVC) go repman.currentCluster.Run() @@ -946,3 +1019,8 @@ func (repman *ReplicationManager) InitGrants() error { sort.Sort(GrantSorter(repman.ServiceAcl)) return nil } + +func IsDefault(p string, v *viper.Viper) bool { + + return false +} diff --git a/server/server_monitor.go b/server/server_monitor.go index 753bde8b5..263e72000 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -14,6 +14,7 @@ package server import ( "bytes" + "fmt" "hash/crc64" "io/ioutil" "runtime" @@ -550,11 +551,30 @@ func init() { } } + //cobra.OnInitialize() initLogFlags(monitorCmd) + a, _ := monitorCmd.Flags().GetBool("test") + fmt.Printf("COUCOU %t : %t", monitorCmd.Flags().Lookup("test").Changed, a) viper.BindPFlags(monitorCmd.Flags()) + a, _ = monitorCmd.Flags().GetBool("test") + + fmt.Printf("COUCOU %t : %t", monitorCmd.Flags().Lookup("test").Changed, a) + /* + var test config.Config + + values := reflect.ValueOf(test) + types := values.Type() + for i := 0; i < values.NumField(); i++ { + fmt.Printf("HAS CHANGED FROM DEFAULT : %s\n", types.Field(i).Name) + if monitorCmd.Flags().Lookup(types.Field(i).Name).Changed { + + fmt.Printf("HAS CHANGED FROM DEFAULT : %s, %s\n", types.Field(i).Name, values.Field(i).String()) + } + }*/ + } func initDeprecated() { From 3bade8dff55992d81e24d9c4f79ce840083b393d Mon Sep 17 00:00:00 2001 From: emma Date: Tue, 11 Apr 2023 08:56:12 +0200 Subject: [PATCH 02/39] confFlag does not take into account command line parameters --- certgen.go | 2 ++ cluster/cluster_has.go | 7 +++++-- config/config.go | 20 ++++++++++++++++++++ server/server.go | 19 ++++++++++++------- server/server_monitor.go | 16 +++++++++++++--- 5 files changed, 52 insertions(+), 12 deletions(-) diff --git a/certgen.go b/certgen.go index b0d7b3263..408eda07d 100644 --- a/certgen.go +++ b/certgen.go @@ -7,6 +7,7 @@ package main import ( "flag" + "fmt" "log" "os" "time" @@ -25,6 +26,7 @@ var ( ) func main() { + fmt.Printf("COUCOU") flag.Parse() if len(*host) == 0 { diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index b6fac6a54..4b2b83977 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -402,14 +402,17 @@ func (cluster *Cluster) IsVariableImmutable(v string) bool { } } if types.Field(i).Type.String() == "bool" { - if values.Field(i).String() != "true" { + if values.Field(i).String() != values_flag.Field(i).String() { log.Printf("TESTE IMMUTABLE val : %s (bool)", values.Field(i).String()) return true } } if types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { - + if values.Field(i).String() != values_flag.Field(i).String() { + log.Printf("TESTE IMMUTABLE val : %s (int)", values.Field(i).String()) + return true + } } } diff --git a/config/config.go b/config/config.go index 10ada3239..f17695c39 100644 --- a/config/config.go +++ b/config/config.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io/ioutil" + "log" "os" "reflect" "strconv" @@ -1067,3 +1068,22 @@ func (conf *Config) GetStringValue(name string) string { } return "" } + +func (conf Config) PrintConf() { + values := reflect.ValueOf(conf) + types := values.Type() + log.Printf("PRINT CONF") + for i := 0; i < values.NumField(); i++ { + + if types.Field(i).Type.String() == "string" { + fmt.Printf("%s : %s (string)\n", types.Field(i).Name, values.Field(i).String()) + } + if types.Field(i).Type.String() == "bool" { + fmt.Printf("%s : %t (bool)\n", types.Field(i).Name, values.Field(i)) + } + if types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { + fmt.Printf("%s : %d (int)\n", types.Field(i).Name, values.Field(i)) + } + + } +} diff --git a/server/server.go b/server/server.go index 43d0c6c58..0ff05109b 100644 --- a/server/server.go +++ b/server/server.go @@ -258,14 +258,17 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { secRead := viper.GetViper() secRead.SetConfigType("toml") - var repman_default config.Config - fistRead.Unmarshal(repman_default) + //var repman_default config.Config + //fistRead.Unmarshal(&repman_default) - repman.ConfFlag = repman_default + //repman.ConfFlag = repman_default //fmt.Printf("%+v\n", fistRead) - fistRead.Debug() - fmt.Printf("%+v\n", fistRead.AllSettings()) + //fistRead.Debug() + fmt.Printf("REPMAN DEFAULT FLAG: ") + //fmt.Printf("%+v\n", fistRead.AllSettings()) + fmt.Printf("%+v\n", repman.ConfFlag) + repman.ConfFlag.PrintConf() //if a config file is already define if conf.ConfigFile != "" { @@ -319,7 +322,9 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //recup tous les param set dans le default secRead = fistRead.Sub("default") - secRead.UnmarshalKey("default", test) + secRead.UnmarshalKey("default", &test) + + fmt.Printf("REPMAN DEFAULT SECTION : ") fmt.Printf("%+v\n", secRead) //from here first read as the combination of default sections variables but not forced parameters @@ -361,7 +366,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //recup tous les param set dans le include fmt.Printf("%+v\n", secRead.AllSettings()) secRead = fistRead.Sub(cluster_name) - secRead.UnmarshalKey(cluster_name, test) + secRead.UnmarshalKey(cluster_name, &test) fmt.Printf("%+v\n", secRead.AllSettings()) fmt.Printf("KEY : %s", secRead.AllKeys()) } diff --git a/server/server_monitor.go b/server/server_monitor.go index 263e72000..7f27cdafd 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -24,12 +24,15 @@ import ( "github.com/go-sql-driver/mysql" "github.com/signal18/replication-manager/cluster" + "github.com/signal18/replication-manager/config" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) +var repman_default config.Config + func init() { conf.ProvOrchestrator = "local" @@ -557,11 +560,16 @@ func init() { a, _ := monitorCmd.Flags().GetBool("test") fmt.Printf("COUCOU %t : %t", monitorCmd.Flags().Lookup("test").Changed, a) - viper.BindPFlags(monitorCmd.Flags()) + //conf des defaults flag sans les paramètres en ligne de commande + //var repman_default config.Config - a, _ = monitorCmd.Flags().GetBool("test") + v := viper.GetViper() + v.SetConfigType("toml") + v.Unmarshal(&repman_default) + //repman_default.PrintConf() + + viper.BindPFlags(monitorCmd.Flags()) - fmt.Printf("COUCOU %t : %t", monitorCmd.Flags().Lookup("test").Changed, a) /* var test config.Config @@ -677,6 +685,8 @@ For interacting with this daemon use, Run: func(cmd *cobra.Command, args []string) { RepMan = new(ReplicationManager) + + RepMan.ConfFlag = repman_default RepMan.InitConfig(conf) RepMan.Run() }, From b5a9a1a8d1dae660f2d861912425920de1aafe73 Mon Sep 17 00:00:00 2001 From: emma Date: Tue, 11 Apr 2023 09:38:25 +0200 Subject: [PATCH 03/39] a --- server/server.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/server/server.go b/server/server.go index 0ff05109b..4909723bd 100644 --- a/server/server.go +++ b/server/server.go @@ -260,14 +260,10 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //var repman_default config.Config //fistRead.Unmarshal(&repman_default) - //repman.ConfFlag = repman_default - //fmt.Printf("%+v\n", fistRead) - //fistRead.Debug() fmt.Printf("REPMAN DEFAULT FLAG: ") - //fmt.Printf("%+v\n", fistRead.AllSettings()) - fmt.Printf("%+v\n", repman.ConfFlag) + repman.ConfFlag.PrintConf() //if a config file is already define @@ -325,10 +321,9 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { secRead.UnmarshalKey("default", &test) fmt.Printf("REPMAN DEFAULT SECTION : ") - fmt.Printf("%+v\n", secRead) + test.PrintConf() //from here first read as the combination of default sections variables but not forced parameters - //fmt.Printf("%+v\n", fistRead) // Proceed include files //if include is defined in a config file From 8344dff79c6321a6a5b05dac919649d5cbfbeea1 Mon Sep 17 00:00:00 2001 From: emma Date: Tue, 11 Apr 2023 09:40:28 +0200 Subject: [PATCH 04/39] add missing files --- etc/local/embed/config.toml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 etc/local/embed/config.toml diff --git a/etc/local/embed/config.toml b/etc/local/embed/config.toml new file mode 100644 index 000000000..8b6712b15 --- /dev/null +++ b/etc/local/embed/config.toml @@ -0,0 +1,11 @@ +[default] +monitoring-sharedir="./.replication-manager/share" +monitoring-datadir="./.replication-manager/data" +monitorinf-basedir="./" +prov-orchestrator = "onpremise" +[mycluster] +db-servers-hosts="localhost:3306" +db-servers-credential="root:" + + +test=false \ No newline at end of file From 20147279c3afbd5046f7e0ba9d103cca35860796 Mon Sep 17 00:00:00 2001 From: emma Date: Tue, 11 Apr 2023 17:17:15 +0200 Subject: [PATCH 05/39] add map for immuable flag --- cluster/cluster.go | 9 +- cluster/cluster_has.go | 37 ++------- server/server.go | 173 ++++++++++++++++++++------------------- server/server_monitor.go | 16 +++- 4 files changed, 114 insertions(+), 121 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 239288d92..e49541ecb 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -91,6 +91,7 @@ type Cluster struct { IsCapturing bool `json:"isCapturing"` Conf config.Config `json:"config"` Confs *config.ConfVersion `json:"-"` + ImmuableMap map[string]interface{} `json:"-"` CleanAll bool `json:"cleanReplication"` //used in testing Topology string `json:"topology"` Uptime string `json:"uptime"` @@ -274,8 +275,10 @@ const ( ) // Init initial cluster definition -func (cluster *Cluster) Init(confs *config.ConfVersion, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { +func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface{}, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { cluster.Confs = confs + cluster.ImmuableMap = imm + conf := confs.ConfInit cluster.SqlErrorLog = logsql.New() cluster.SqlGeneralLog = logsql.New() @@ -337,6 +340,10 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, cfgGroup string, tlog *s cluster.LogPushover = log.New() + cluster.LogPrintf(LvlErr, "TEST is immuable test : %t", cluster.IsVariableImmutable("test")) + cluster.LogPrintf(LvlErr, "TEST is immuable replication-credential: %t", cluster.IsVariableImmutable("replication-credential")) + cluster.LogPrintf(LvlErr, "TEST immuable map : %s", cluster.ImmuableMap) + if cluster.Conf.PushoverAppToken != "" && cluster.Conf.PushoverUserToken != "" { cluster.LogPushover.AddHook( pushover.NewHook(cluster.Conf.PushoverAppToken, cluster.Conf.PushoverUserToken), diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 4b2b83977..56c03c7e2 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -8,7 +8,6 @@ package cluster import ( "fmt" - "log" "reflect" "strings" @@ -387,36 +386,10 @@ func (cluster *Cluster) IsVariableDiffFromRepmanDefault(v string) bool { } func (cluster *Cluster) IsVariableImmutable(v string) bool { - values := reflect.ValueOf(cluster.Conf) - types := values.Type() - - values_flag := reflect.ValueOf(cluster.Confs.ConfFlag) - //types_flag := values_flag.Type() - - for i := 0; i < values.NumField(); i++ { - if types.Field(i).Name == v { - if types.Field(i).Type.String() == "string" { - if values.Field(i).String() != "" && values.Field(i).String() != values_flag.Field(i).String() { - log.Printf("TESTE IMMUTABLE val : %s (string)", values.Field(i).String()) - return true - } - } - if types.Field(i).Type.String() == "bool" { - if values.Field(i).String() != values_flag.Field(i).String() { - log.Printf("TESTE IMMUTABLE val : %s (bool)", values.Field(i).String()) - return true - } - - } - if types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { - if values.Field(i).String() != values_flag.Field(i).String() { - log.Printf("TESTE IMMUTABLE val : %s (int)", values.Field(i).String()) - return true - } - } - } - + _, ok := cluster.ImmuableMap[v] + if !ok { + return false } - - return false + //cluster.LogPrintf(LvlErr, "TEST immuable : %t", ok) + return true } diff --git a/server/server.go b/server/server.go index 4909723bd..0e0c05943 100644 --- a/server/server.go +++ b/server/server.go @@ -49,36 +49,38 @@ var RepMan *ReplicationManager // Global variables type ReplicationManager struct { - OpenSVC opensvc.Collector `json:"-"` - Version string `json:"version"` - Fullversion string `json:"fullVersion"` - Os string `json:"os"` - Arch string `json:"arch"` - MemProfile string `json:"memprofile"` - Clusters map[string]*cluster.Cluster `json:"-"` - Agents []opensvc.Host `json:"agents"` - UUID string `json:"uuid"` - Hostname string `json:"hostname"` - Status string `json:"status"` - SplitBrain bool `json:"spitBrain"` - ClusterList []string `json:"clusters"` - Tests []string `json:"tests"` - Conf config.Config `json:"config"` - ConfFlag config.Config `json:"-"` - ConfigPathList []string `json:"-"` - Logs s18log.HttpLog `json:"logs"` - ServicePlans []config.ServicePlan `json:"servicePlans"` - ServiceOrchestrators []config.ConfigVariableType `json:"serviceOrchestrators"` - ServiceAcl []config.Grant `json:"serviceAcl"` - ServiceRepos []config.DockerRepo `json:"serviceRepos"` - ServiceTarballs []config.Tarball `json:"serviceTarballs"` - ServiceFS map[string]bool `json:"serviceFS"` - ServiceVM map[string]bool `json:"serviceVM"` - ServiceDisk map[string]string `json:"serviceDisk"` - ServicePool map[string]bool `json:"servicePool"` - BackupLogicalList map[string]bool `json:"backupLogicalList"` - BackupPhysicalList map[string]bool `json:"backupPhysicalList"` - currentCluster *cluster.Cluster `json:"-"` + OpenSVC opensvc.Collector `json:"-"` + Version string `json:"version"` + Fullversion string `json:"fullVersion"` + Os string `json:"os"` + Arch string `json:"arch"` + MemProfile string `json:"memprofile"` + Clusters map[string]*cluster.Cluster `json:"-"` + Agents []opensvc.Host `json:"agents"` + UUID string `json:"uuid"` + Hostname string `json:"hostname"` + Status string `json:"status"` + SplitBrain bool `json:"spitBrain"` + ClusterList []string `json:"clusters"` + Tests []string `json:"tests"` + Conf config.Config `json:"config"` + ConfFlag config.Config `json:"-"` + ImmuableMaps map[string]map[string]interface{} `json:"-"` + CommandLineFlag []string `json:"-"` + ConfigPathList []string `json:"-"` + Logs s18log.HttpLog `json:"logs"` + ServicePlans []config.ServicePlan `json:"servicePlans"` + ServiceOrchestrators []config.ConfigVariableType `json:"serviceOrchestrators"` + ServiceAcl []config.Grant `json:"serviceAcl"` + ServiceRepos []config.DockerRepo `json:"serviceRepos"` + ServiceTarballs []config.Tarball `json:"serviceTarballs"` + ServiceFS map[string]bool `json:"serviceFS"` + ServiceVM map[string]bool `json:"serviceVM"` + ServiceDisk map[string]string `json:"serviceDisk"` + ServicePool map[string]bool `json:"servicePool"` + BackupLogicalList map[string]bool `json:"backupLogicalList"` + BackupPhysicalList map[string]bool `json:"backupPhysicalList"` + currentCluster *cluster.Cluster `json:"-"` tlog s18log.TermLog termlength int exitMsg string @@ -242,6 +244,8 @@ func (repman *ReplicationManager) initEmbed() error { func (repman *ReplicationManager) InitConfig(conf config.Config) { repman.VersionConfs = make(map[string]*config.ConfVersion) + repman.ImmuableMaps = make(map[string]map[string]interface{}) + ImmuableMap := make(map[string]interface{}) // call after init if configuration file is provide //if repman is embed, create folders and load missing embedded files @@ -253,18 +257,9 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { fistRead := viper.GetViper() fistRead.SetConfigType("toml") - var test config.Config - - secRead := viper.GetViper() - secRead.SetConfigType("toml") - - //var repman_default config.Config - //fistRead.Unmarshal(&repman_default) - //repman.ConfFlag = repman_default - - fmt.Printf("REPMAN DEFAULT FLAG: ") - - repman.ConfFlag.PrintConf() + //fmt.Printf("REPMAN DEFAULT FLAG: ") + //ConfFlag is already set in server_monitor to get all default value flag (without being overwrited by command line flag) + //repman.ConfFlag.PrintConf() //if a config file is already define if conf.ConfigFile != "" { @@ -311,17 +306,26 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { "file": fistRead.ConfigFileUsed(), }).Debug("Using config file") } else { - // if _, ok := err.(fistRead.ConfigParseError); ok { - //log.WithError(err).Fatal("Could not parse config file") log.Errorf("Could not parse config file: %s", err) } - //recup tous les param set dans le default - secRead = fistRead.Sub("default") - secRead.UnmarshalKey("default", &test) + //recup tous les param set dans le default (avec les lignes de commandes) + //err = fistRead.MergeInConfig() + if err != nil { + log.Fatal("Config error in " + conf.ClusterConfigPath + ":" + err.Error()) + } + secRead := fistRead.Sub("DEFAULT") + //var test config.Config + //secRead.UnmarshalKey("default", &test) + + fmt.Printf("REPMAN DEFAULT SECTION : %s", secRead.AllSettings()) + //Add immuatable flag from default section + for _, f := range secRead.AllKeys() { + ImmuableMap[f] = secRead.Get(f) + } + fmt.Printf("REPMAN DEFAULT SECTION : %s", ImmuableMap) - fmt.Printf("REPMAN DEFAULT SECTION : ") - test.PrintConf() + //test.PrintConf() //from here first read as the combination of default sections variables but not forced parameters @@ -345,8 +349,8 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //read and set config from all files in the include path for _, f := range files { if !f.IsDir() && strings.HasSuffix(f.Name(), ".toml") { - file_name := strings.Split(f.Name(), ".") - cluster_name := file_name[0] + //file_name := strings.Split(f.Name(), ".") + //cluster_name := file_name[0] fistRead.SetConfigName(f.Name()) fistRead.SetConfigFile(conf.ClusterConfigPath + "/" + f.Name()) // viper.Debug() @@ -359,11 +363,8 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { } //recup tous les param set dans le include - fmt.Printf("%+v\n", secRead.AllSettings()) - secRead = fistRead.Sub(cluster_name) - secRead.UnmarshalKey(cluster_name, &test) - fmt.Printf("%+v\n", secRead.AllSettings()) - fmt.Printf("KEY : %s", secRead.AllKeys()) + //secRead = fistRead.Sub(cluster_name) + //secRead.UnmarshalKey(cluster_name, &test) } } } else { @@ -435,6 +436,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { vipersave.Unmarshal(&conf) // fmt.Printf("%+v\n", conf) //os.Exit(3) + //conf.PrintConf() repman.Conf = conf } // backupvipersave := viper.GetViper() @@ -447,7 +449,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { for _, cluster := range repman.ClusterList { //vipersave := backupvipersave - confs[cluster] = repman.GetClusterConfig(fistRead, cluster, conf) + confs[cluster] = repman.GetClusterConfig(fistRead, ImmuableMap, cluster, conf) cfgGroupIndex++ } @@ -463,34 +465,38 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { confs["Default"] = conf } - fmt.Printf("%+v\n", fistRead.AllSettings()) + + //fmt.Printf("%+v\n", fistRead.AllSettings()) repman.Confs = confs //repman.Conf = conf } -func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, cluster string, conf config.Config) config.Config { +func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, ImmuableMap map[string]interface{}, cluster string, conf config.Config) config.Config { confs := new(config.ConfVersion) + //Add immuatable flag from command line + for _, f := range repman.CommandLineFlag { + ImmuableMap[f] = fistRead.Get(f) + } + + //set the default config clusterconf := conf - //vipersave := viper.GetViper() + + //conf.PrintConf() + //fmt.Printf("%+v\n", fistRead.AllSettings()) + + //if name cluster is defined if cluster != "" { log.WithField("group", cluster).Debug("Reading configuration group") - def := fistRead.Sub("Default") - // def.Debug() - def.AutomaticEnv() - def.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) - def.SetEnvPrefix("DEFAULT") - if def != nil { - repman.initAlias(def) - def.Unmarshal(&clusterconf) - - } - //fmt.Printf("default for cluster %s %+v\n", cluster, clusterconf) + //extract the cluster config from the viper cf2 := fistRead.Sub(cluster) + //fmt.Printf("%+v\n", cf2.AllSettings()) - //def.SetEnvPrefix(strings.ToUpper(cluster)) - // + //Add immuatable flag from cluster section + for _, f := range cf2.AllKeys() { + ImmuableMap[f] = cf2.Get(f) + } if cf2 == nil { log.WithField("group", cluster).Infof("Could not parse configuration group") @@ -498,29 +504,28 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, cluste cf2.AutomaticEnv() cf2.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) repman.initAlias(cf2) - // cf2.Unmarshal(&def) cf2.Unmarshal(&clusterconf) - // fmt.Printf("include config cf2 for cluster %s %+v\n", cluster, clusterconf) - // vipersave.MergeConfigMap(cf2.AllSettings()) - // vipersave.Unmarshal(&clusterconf) - // fmt.Printf("include config for cluster %s %+v\n", cluster, clusterconf) - } + //clusterconf.PrintConf() + + //save the immuable map for the cluster + //fmt.Printf("Immuatable map : %s\n", ImmuableMap) + repman.ImmuableMaps[cluster] = ImmuableMap + + //store default cluster config in immutable config (all parameter set in default and cluster section, default value and command line) confs.ConfImmutable = clusterconf confs.ConfFlag = repman.ConfFlag - fmt.Printf("%+v\n", cf2.AllSettings()) + //fmt.Printf("%+v\n", cf2.AllSettings()) + //if dynamic config, load modified parameter from the saved config if clusterconf.ConfRewrite { cf3 := fistRead.Sub("saved-" + cluster) if cf3 == nil { log.WithField("group", cluster).Info("Could not parse saved configuration group") } else { repman.initAlias(cf3) - cf3.Unmarshal(&def) cf3.Unmarshal(&clusterconf) - // vipersave.MergeConfigMap(cf3.AllSettings()) - // vipersave.Unmarshal(&clusterconf) } confs.ConfDynamic = clusterconf } @@ -792,7 +797,7 @@ func (repman *ReplicationManager) StartCluster(clusterName string) (*cluster.Clu myClusterConf.WorkingDir = myClusterConf.BaseDir + "/data" } repman.VersionConfs[clusterName].ConfInit = myClusterConf - repman.currentCluster.Init(repman.VersionConfs[clusterName], clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) + repman.currentCluster.Init(repman.VersionConfs[clusterName], repman.ImmuableMaps[clusterName], clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) repman.Clusters[clusterName] = repman.currentCluster repman.currentCluster.SetCertificate(repman.OpenSVC) go repman.currentCluster.Run() diff --git a/server/server_monitor.go b/server/server_monitor.go index 7f27cdafd..5336bbbcc 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -14,7 +14,6 @@ package server import ( "bytes" - "fmt" "hash/crc64" "io/ioutil" "runtime" @@ -557,8 +556,6 @@ func init() { //cobra.OnInitialize() initLogFlags(monitorCmd) - a, _ := monitorCmd.Flags().GetBool("test") - fmt.Printf("COUCOU %t : %t", monitorCmd.Flags().Lookup("test").Changed, a) //conf des defaults flag sans les paramètres en ligne de commande //var repman_default config.Config @@ -685,7 +682,7 @@ For interacting with this daemon use, Run: func(cmd *cobra.Command, args []string) { RepMan = new(ReplicationManager) - + RepMan.CommandLineFlag = GetCommandLineFlag(cmd) RepMan.ConfFlag = repman_default RepMan.InitConfig(conf) RepMan.Run() @@ -695,3 +692,14 @@ For interacting with this daemon use, RepMan.Stop() }, } + +func GetCommandLineFlag(cmd *cobra.Command) []string { + var cmd_flag []string + flag := viper.AllKeys() + for _, f := range flag { + if cmd.Flags().Changed(f) { + cmd_flag = append(cmd_flag, f) + } + } + return cmd_flag +} From d45edf853893c5f30c61920daa1091efce4c92b0 Mon Sep 17 00:00:00 2001 From: emma Date: Wed, 12 Apr 2023 13:51:51 +0200 Subject: [PATCH 06/39] modification of save func and add overwrite func --- cluster/cluster.go | 86 +++++++++++++++++++++++++++++------------- cluster/cluster_add.go | 43 +++++++++++++++++++++ cluster/cluster_has.go | 8 +--- server/api_cluster.go | 17 ++++++++- server/server.go | 76 +++++++++++++++++++++++++++++-------- 5 files changed, 180 insertions(+), 50 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index e49541ecb..bb5b99ca0 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -21,7 +21,6 @@ import ( t "text/template" "time" - "github.com/BurntSushi/toml" "github.com/bluele/logrus_slack" "github.com/signal18/replication-manager/cluster/configurator" "github.com/signal18/replication-manager/cluster/nbc" @@ -91,7 +90,8 @@ type Cluster struct { IsCapturing bool `json:"isCapturing"` Conf config.Config `json:"config"` Confs *config.ConfVersion `json:"-"` - ImmuableMap map[string]interface{} `json:"-"` + ImmuableFlagMap map[string]interface{} `json:"-"` + DynamicFlagMap map[string]interface{} `json:"-"` CleanAll bool `json:"cleanReplication"` //used in testing Topology string `json:"topology"` Uptime string `json:"uptime"` @@ -275,10 +275,10 @@ const ( ) // Init initial cluster definition -func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface{}, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { +func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface{}, dyn map[string]interface{}, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { cluster.Confs = confs - cluster.ImmuableMap = imm - + cluster.ImmuableFlagMap = imm + cluster.DynamicFlagMap = dyn conf := confs.ConfInit cluster.SqlErrorLog = logsql.New() cluster.SqlGeneralLog = logsql.New() @@ -340,9 +340,8 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface cluster.LogPushover = log.New() - cluster.LogPrintf(LvlErr, "TEST is immuable test : %t", cluster.IsVariableImmutable("test")) - cluster.LogPrintf(LvlErr, "TEST is immuable replication-credential: %t", cluster.IsVariableImmutable("replication-credential")) - cluster.LogPrintf(LvlErr, "TEST immuable map : %s", cluster.ImmuableMap) + //fmt.Printf("TEST immuable map : %s", cluster.ImmuableFlagMap) + //fmt.Printf("TEST is immuable test : %t", cluster.IsVariableImmutable("test")) if cluster.Conf.PushoverAppToken != "" && cluster.Conf.PushoverUserToken != "" { cluster.LogPushover.AddHook( @@ -784,26 +783,46 @@ func (cluster *Cluster) Save() error { } defer file.Close() - values := reflect.ValueOf(myconf["saved-"+cluster.Name]) - types := values.Type() + /* + values := reflect.ValueOf(myconf["saved-"+cluster.Name]) + types := values.Type() + s := "" + ss := "" + file.WriteString("[saved-" + cluster.Name + "]\n") + for i := 0; i < values.NumField(); i++ { + _, ok := cluster.ImmuableFlagMap[types.Name()] + if values.Field(i).String() != "" || !ok { + if types.Field(i).Type.String() == "string" { + s = " " + types.Field(i).Name + " = \"" + values.Field(i).String() + "\"\n" + } + if types.Field(i).Type.String() == "bool" || types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { + s = " " + types.Field(i).Name + " = " + ss = format(" {{.}} \n", values.Field(i)) + } + file.WriteString(s) + file.WriteString(ss) + ss = "" + } + }*/ s := "" ss := "" file.WriteString("[saved-" + cluster.Name + "]\n") - for i := 0; i < values.NumField(); i++ { - if values.Field(i).String() != "" { - if types.Field(i).Type.String() == "string" { - s = " " + types.Field(i).Name + " = \"" + values.Field(i).String() + "\"\n" - } - if types.Field(i).Type.String() == "bool" || types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { - s = " " + types.Field(i).Name + " = " - ss = format(" {{.}} \n", values.Field(i)) - } - file.WriteString(s) - file.WriteString(ss) - ss = "" + for tag := range cluster.DynamicFlagMap { + s = " " + tag + " = " + fmt.Printf("SAVE : %s", tag) + if reflect.TypeOf(cluster.DynamicFlagMap[tag]).String() == "string" { + s += "'" + ss = format("{{.}}", cluster.DynamicFlagMap[tag]) + "'\n" + } else { + ss = format(" {{.}} \n", cluster.DynamicFlagMap[tag]) } + file.WriteString(s) + file.WriteString(ss) + } + err = cluster.Overwrite() + if err != nil { + cluster.LogPrintf(LvlInfo, "Error during Overwriting: %s", err) } - } return nil @@ -830,9 +849,24 @@ func (cluster *Cluster) Overwrite() error { return err } defer file.Close() - err = toml.NewEncoder(file).Encode(myconf) - if err != nil { - return err + s := "" + ss := "" + file.WriteString("[overwrite-" + cluster.Name + "]\n") + for tag := range cluster.ImmuableFlagMap { + _, ok := cluster.DynamicFlagMap[tag] + if ok { + s = " " + tag + " = " + if reflect.TypeOf(cluster.DynamicFlagMap[tag]).String() == "string" { + s += "'" + ss = format("{{.}}", cluster.DynamicFlagMap[tag]) + "'\n" + } else { + ss = format(" {{.}} \n", cluster.DynamicFlagMap[tag]) + } + + file.WriteString(s) + file.WriteString(ss) + } + } } diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 1c0f3dd74..90937b05d 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -7,6 +7,8 @@ package cluster import ( + "fmt" + "strconv" "strings" "sync" @@ -36,6 +38,7 @@ func (cluster *Cluster) AddDBTagConfig(tag string) { cluster.Configurator.AddDBTag(tag) cluster.Conf.ProvTags = cluster.Configurator.GetConfigDBTags() cluster.SetClusterCredentialsFromConfig() + cluster.AddDBTagDynamicFlagMap(tag) } } @@ -63,6 +66,46 @@ func (cluster *Cluster) AddDBTag(tag string) { } +func (cluster *Cluster) AddDBTagDynamicFlagMap(tag string) { + //to add dynamic tag in the cluster dynamic flag map + v, ok := cluster.DynamicFlagMap["prov-db-tags"] + if ok { + str := fmt.Sprintf("%v", v) + if !strings.Contains(str, tag) { + str += "," + tag + cluster.DynamicFlagMap["prov-db-tags"] = str + } + } else { + v, ok := cluster.ImmuableFlagMap["prov-db-tags"] + if ok { + imm_tag := fmt.Sprintf("%v", v) + cluster.DynamicFlagMap["prov-db-tags"] = imm_tag + "," + tag + } else { + cluster.DynamicFlagMap["prov-db-tags"] = tag + } + + } +} + +func (cluster *Cluster) AddDynamicFlagMap(name string, val string) { + //to add dynamic setting in the cluster dynamic flag map + if name == "failover-max-slave-delay" { + cluster.DynamicFlagMap[name], _ = strconv.ParseInt(val, 10, 64) + } else if name == "failover-limit" { + cluster.DynamicFlagMap[name], _ = strconv.Atoi(val) + } else if val == "bool" { + b, ok := cluster.ImmuableFlagMap[name] + if ok { + cluster.DynamicFlagMap[name] = !b.(bool) + } else { + cluster.DynamicFlagMap[name] = true + } + + } else { + cluster.DynamicFlagMap[name] = val + } +} + func (cluster *Cluster) AddProxyTag(tag string) { cluster.Configurator.AddProxyTag(tag) cluster.Conf.ProvProxTags = cluster.Configurator.GetConfigProxyTags() diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 56c03c7e2..c11ae97cb 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -386,10 +386,6 @@ func (cluster *Cluster) IsVariableDiffFromRepmanDefault(v string) bool { } func (cluster *Cluster) IsVariableImmutable(v string) bool { - _, ok := cluster.ImmuableMap[v] - if !ok { - return false - } - //cluster.LogPrintf(LvlErr, "TEST immuable : %t", ok) - return true + _, ok := cluster.ImmuableFlagMap[v] + return ok } diff --git a/server/api_cluster.go b/server/api_cluster.go index aa1d54764..b7d29401d 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -1088,6 +1088,9 @@ func (repman *ReplicationManager) switchSettings(mycluster *cluster.Cluster, set case "monitoring-processlist": mycluster.SwitchMonitoringProcesslist() } + if mycluster.IsVariableDiffFromRepmanDefault(setting) { + mycluster.AddDynamicFlagMap(setting, "bool") + } } func (repman *ReplicationManager) handlerMuxSetSettings(w http.ResponseWriter, r *http.Request) { @@ -1100,8 +1103,15 @@ func (repman *ReplicationManager) handlerMuxSetSettings(w http.ResponseWriter, r return } setting := vars["settingName"] - mycluster.LogPrintf("INFO", "API receive set setting %s", setting) - repman.setSetting(mycluster, setting, vars["settingValue"]) + //not immuable + if !mycluster.IsVariableImmutable(setting) { + mycluster.LogPrintf("INFO", "API receive set setting %s", setting) + repman.setSetting(mycluster, setting, vars["settingValue"]) + } else { + mycluster.LogPrintf(cluster.LvlErr, "Can not overwrite immuable parameter defined in config , please use config-merge command to preserve them between restart") + mycluster.LogPrintf("INFO", "API receive set setting %s", setting) + repman.setSetting(mycluster, setting, vars["settingValue"]) + } } else { http.Error(w, "No cluster", 500) return @@ -1234,6 +1244,9 @@ func (repman *ReplicationManager) setSetting(mycluster *cluster.Cluster, name st case "backup-binlogs-keep": mycluster.SetBackupBinlogsKeep(value) } + if mycluster.IsVariableDiffFromRepmanDefault(name) { + mycluster.AddDynamicFlagMap(name, value) + } } func (repman *ReplicationManager) handlerMuxAddTag(w http.ResponseWriter, r *http.Request) { diff --git a/server/server.go b/server/server.go index 0e0c05943..6628057c0 100644 --- a/server/server.go +++ b/server/server.go @@ -65,7 +65,8 @@ type ReplicationManager struct { Tests []string `json:"tests"` Conf config.Config `json:"config"` ConfFlag config.Config `json:"-"` - ImmuableMaps map[string]map[string]interface{} `json:"-"` + ImmuableFlagMaps map[string]map[string]interface{} `json:"-"` + DynamicFlagMaps map[string]map[string]interface{} `json:"-"` CommandLineFlag []string `json:"-"` ConfigPathList []string `json:"-"` Logs s18log.HttpLog `json:"logs"` @@ -244,8 +245,10 @@ func (repman *ReplicationManager) initEmbed() error { func (repman *ReplicationManager) InitConfig(conf config.Config) { repman.VersionConfs = make(map[string]*config.ConfVersion) - repman.ImmuableMaps = make(map[string]map[string]interface{}) + repman.ImmuableFlagMaps = make(map[string]map[string]interface{}) + repman.DynamicFlagMaps = make(map[string]map[string]interface{}) ImmuableMap := make(map[string]interface{}) + DynamicMap := make(map[string]interface{}) // call after init if configuration file is provide //if repman is embed, create folders and load missing embedded files @@ -318,12 +321,12 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //var test config.Config //secRead.UnmarshalKey("default", &test) - fmt.Printf("REPMAN DEFAULT SECTION : %s", secRead.AllSettings()) + //fmt.Printf("REPMAN DEFAULT SECTION : %s", secRead.AllSettings()) + //Add immuatable flag from default section for _, f := range secRead.AllKeys() { ImmuableMap[f] = secRead.Get(f) } - fmt.Printf("REPMAN DEFAULT SECTION : %s", ImmuableMap) //test.PrintConf() @@ -378,6 +381,9 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { conf.WorkingDir = fistRead.GetString("default.monitoring-datadir") } + dynRead := viper.GetViper() + dynRead.SetConfigType("toml") + //load files from the working dir files, err := ioutil.ReadDir(conf.WorkingDir) if err != nil { @@ -387,19 +393,24 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //read and set config from all files in the working dir for _, f := range files { if f.IsDir() && f.Name() != "graphite" { - fistRead.SetConfigName(f.Name()) + //fistRead.SetConfigName(f.Name()) + dynRead.SetConfigName(f.Name()) if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/config.toml"); os.IsNotExist(err) { log.Warning("No monitoring saved config found " + conf.WorkingDir + "/" + f.Name() + "/config.toml") } else { log.Infof("Parsing saved config from working directory %s ", conf.WorkingDir+"/"+f.Name()+"/config.toml") fistRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/config.toml") + dynRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/config.toml") err := fistRead.MergeInConfig() + err = dynRead.MergeInConfig() if err != nil { log.Fatal("Config error in " + conf.WorkingDir + "/" + f.Name() + "/config.toml" + ":" + err.Error()) } } } } + //fmt.Printf("%+v\n", dynRead.AllSettings()) + //fmt.Printf("%s\n", dynRead.AllKeys()) } else { log.Warning("No monitoring-save-config variable in default section config change lost on restart") @@ -449,7 +460,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { for _, cluster := range repman.ClusterList { //vipersave := backupvipersave - confs[cluster] = repman.GetClusterConfig(fistRead, ImmuableMap, cluster, conf) + confs[cluster] = repman.GetClusterConfig(fistRead, ImmuableMap, DynamicMap, cluster, conf) cfgGroupIndex++ } @@ -471,12 +482,28 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //repman.Conf = conf } -func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, ImmuableMap map[string]interface{}, cluster string, conf config.Config) config.Config { +func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, ImmuableMap map[string]interface{}, DynamicMap map[string]interface{}, cluster string, conf config.Config) config.Config { confs := new(config.ConfVersion) + clustImmuableMap := make(map[string]interface{}) + clustDynamicMap := make(map[string]interface{}) + + //to copy default immuable flag in the immuable flag cluster map + for k, v := range ImmuableMap { + clustImmuableMap[k] = v + } + + //to copy default dynamic flag in the dynamic flag cluster map + for k, v := range DynamicMap { + clustDynamicMap[k] = v + } //Add immuatable flag from command line for _, f := range repman.CommandLineFlag { - ImmuableMap[f] = fistRead.Get(f) + v := fistRead.Get(f) + if v != nil { + clustImmuableMap[f] = v + } + } //set the default config @@ -491,12 +518,6 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab //extract the cluster config from the viper cf2 := fistRead.Sub(cluster) - //fmt.Printf("%+v\n", cf2.AllSettings()) - - //Add immuatable flag from cluster section - for _, f := range cf2.AllKeys() { - ImmuableMap[f] = cf2.Get(f) - } if cf2 == nil { log.WithField("group", cluster).Infof("Could not parse configuration group") @@ -506,11 +527,17 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab repman.initAlias(cf2) cf2.Unmarshal(&clusterconf) } + + //Add immuatable flag from cluster section + for _, f := range cf2.AllKeys() { + clustImmuableMap[f] = cf2.Get(f) + } + //clusterconf.PrintConf() //save the immuable map for the cluster //fmt.Printf("Immuatable map : %s\n", ImmuableMap) - repman.ImmuableMaps[cluster] = ImmuableMap + repman.ImmuableFlagMaps[cluster] = clustImmuableMap //store default cluster config in immutable config (all parameter set in default and cluster section, default value and command line) confs.ConfImmutable = clusterconf @@ -528,7 +555,24 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab cf3.Unmarshal(&clusterconf) } confs.ConfDynamic = clusterconf + //to add flag in cluster dynamic map only if not defined yet or if the flag value read is diff from immuable flag value + for _, f := range cf2.AllKeys() { + v := cf2.Get(f) + if v != nil { + imm_v, ok := clustImmuableMap[f] + if ok && imm_v != v { + clustDynamicMap[f] = v + } + if !ok { + clustDynamicMap[f] = v + } + + } + + } } + fmt.Printf("GET CLUST CONF Dynamic map : %s\n", clustDynamicMap) + repman.DynamicFlagMaps[cluster] = clustDynamicMap confs.ConfInit = clusterconf repman.VersionConfs[cluster] = confs @@ -797,7 +841,7 @@ func (repman *ReplicationManager) StartCluster(clusterName string) (*cluster.Clu myClusterConf.WorkingDir = myClusterConf.BaseDir + "/data" } repman.VersionConfs[clusterName].ConfInit = myClusterConf - repman.currentCluster.Init(repman.VersionConfs[clusterName], repman.ImmuableMaps[clusterName], clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) + repman.currentCluster.Init(repman.VersionConfs[clusterName], repman.ImmuableFlagMaps[clusterName], repman.DynamicFlagMaps[clusterName], clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) repman.Clusters[clusterName] = repman.currentCluster repman.currentCluster.SetCertificate(repman.OpenSVC) go repman.currentCluster.Run() From 0cfdd82a96da6892a404cd824f1a2680afae15d1 Mon Sep 17 00:00:00 2001 From: emma Date: Thu, 13 Apr 2023 09:21:48 +0200 Subject: [PATCH 07/39] add flag from api switch setting in dynamic flag map --- cluster/cluster_add.go | 3 ++- config/config.go | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 90937b05d..3078a0073 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -98,7 +98,8 @@ func (cluster *Cluster) AddDynamicFlagMap(name string, val string) { if ok { cluster.DynamicFlagMap[name] = !b.(bool) } else { - cluster.DynamicFlagMap[name] = true + + cluster.DynamicFlagMap[name] = !cluster.Confs.ConfFlag.GetBool(name) } } else { diff --git a/config/config.go b/config/config.go index f17695c39..373364fa8 100644 --- a/config/config.go +++ b/config/config.go @@ -1069,6 +1069,17 @@ func (conf *Config) GetStringValue(name string) string { return "" } +func (conf *Config) GetBool(name string) bool { + values := reflect.ValueOf(conf) + types := values.Type() + for i := 0; i < values.NumField(); i++ { + if types.Field(i).Name == name { + return values.Field(i).Bool() + } + } + return false +} + func (conf Config) PrintConf() { values := reflect.ValueOf(conf) types := values.Type() From 4514490fa31dd4e0a247ac5caebef47f2252cef9 Mon Sep 17 00:00:00 2001 From: emma Date: Thu, 13 Apr 2023 20:39:03 +0200 Subject: [PATCH 08/39] fix pushover alerting --- cluster/cluster.go | 6 +++-- cluster/cluster_log.go | 30 +++++++++++++++++-------- utils/logrus/hooks/pushover/pushover.go | 19 +++++++++++++--- 3 files changed, 41 insertions(+), 14 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index bb5b99ca0..5ad31c448 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -337,8 +337,8 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface if _, err := os.Stat(cluster.WorkingDir); os.IsNotExist(err) { os.MkdirAll(cluster.Conf.WorkingDir+"/"+cluster.Name, os.ModePerm) } - cluster.LogPushover = log.New() + cluster.LogPushover.SetFormatter(&log.TextFormatter{FullTimestamp: true}) //fmt.Printf("TEST immuable map : %s", cluster.ImmuableFlagMap) //fmt.Printf("TEST is immuable test : %t", cluster.IsVariableImmutable("test")) @@ -351,6 +351,7 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface } cluster.LogSlack = log.New() + cluster.LogSlack.SetFormatter(&log.TextFormatter{FullTimestamp: true}) if cluster.Conf.SlackURL != "" { cluster.LogSlack.AddHook(&logrus_slack.SlackHook{ @@ -362,7 +363,8 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface Timeout: 5 * time.Second, // request timeout for calling slack api }) } - cluster.LogPrintf("ALERT", "Replication manager init cluster version : %s", cluster.Conf.Version) + cluster.LogPrintf("START", "Replication manager started with version: %s", cluster.Conf.Version) + if cluster.Conf.MailTo != "" { msg := "Replication manager init cluster version : " + cluster.Conf.Version subj := "Replication-Manager version" diff --git a/cluster/cluster_log.go b/cluster/cluster_log.go index 878a96c34..ab124d91d 100644 --- a/cluster/cluster_log.go +++ b/cluster/cluster_log.go @@ -129,6 +129,7 @@ func (cluster *Cluster) LogUpdate(line int, level string, format string, args .. } func (cluster *Cluster) LogPrintf(level string, format string, args ...interface{}) int { + //fmt.Printf("CLUSTER LOGPRINTF %s :"+format, level, args) line := 0 stamp := fmt.Sprint(time.Now().Format("2006/01/02 15:04:05")) padright := func(str, pad string, lenght int) string { @@ -173,7 +174,7 @@ func (cluster *Cluster) LogPrintf(level string, format string, args ...interface case "ERROR": log.WithField("cluster", cluster.Name).Errorf(cliformat, args...) if cluster.Conf.SlackURL != "" { - cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert"}).Errorf(cliformat, args...) + cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "Slack"}).Errorf(cliformat, args...) } if cluster.Conf.TeamsUrl != "" { go cluster.sendMsTeams(level, format, args) @@ -185,22 +186,33 @@ func (cluster *Cluster) LogPrintf(level string, format string, args ...interface case "WARN": log.WithField("cluster", cluster.Name).Warnf(cliformat, args...) if cluster.Conf.SlackURL != "" { - cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert"}).Errorf(cliformat, args...) + cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "Slack"}).Errorf(cliformat, args...) } if cluster.Conf.TeamsUrl != "" { go cluster.sendMsTeams(level, format, args) } case "TEST": - log.WithFields(log.Fields{"cluster": cluster.Name, "type": "test"}).Infof(cliformat, args...) + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "test", "channel": "StdOut"}).Infof(cliformat, args...) case "BENCH": - log.WithFields(log.Fields{"cluster": cluster.Name, "type": "benchmark"}).Infof(cliformat, args...) + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "benchmark", "channel": "StdOut"}).Infof(cliformat, args...) case "ALERT": - log.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert"}).Errorf(cliformat, args...) + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "StdOut"}).Errorf(cliformat, args...) if cluster.Conf.SlackURL != "" { - cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert"}).Errorf(cliformat, args...) + cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "Slack"}).Errorf(cliformat, args...) } if cluster.Conf.PushoverAppToken != "" && cluster.Conf.PushoverUserToken != "" { - cluster.LogPushover.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert"}).Errorf(cliformat, args...) + cluster.LogPushover.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "Pushover"}).Errorf(cliformat, args...) + } + if cluster.Conf.TeamsUrl != "" { + go cluster.sendMsTeams(level, format, args) + } + case "START": + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "StdOut"}).Warnf(cliformat, args...) + if cluster.Conf.SlackURL != "" { + cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "start", "channel": "Slack"}).Warnf(cliformat, args...) + } + if cluster.Conf.PushoverAppToken != "" && cluster.Conf.PushoverUserToken != "" { + cluster.LogPushover.WithFields(log.Fields{"cluster": cluster.Name, "type": "start", "channel": "Pushover"}).Warnf(cliformat, args...) } if cluster.Conf.TeamsUrl != "" { go cluster.sendMsTeams(level, format, args) @@ -210,9 +222,9 @@ func (cluster *Cluster) LogPrintf(level string, format string, args ...interface code := cliformat[7:15] err := cliformat[18:] if status == "OPENED" { - log.WithFields(log.Fields{"cluster": cluster.Name, "type": "state", "status": status, "code": code}).Warnf(err, args...) + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "state", "status": status, "code": code, "channel": "StdOut"}).Warnf(err, args...) } else { - log.WithFields(log.Fields{"cluster": cluster.Name, "type": "state", "status": status, "code": code}).Warnf(err, args...) + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "state", "status": status, "code": code, "channel": "StdOut"}).Warnf(err, args...) } default: log.Printf(cliformat, args...) diff --git a/utils/logrus/hooks/pushover/pushover.go b/utils/logrus/hooks/pushover/pushover.go index d3d09db88..5630f0a8b 100644 --- a/utils/logrus/hooks/pushover/pushover.go +++ b/utils/logrus/hooks/pushover/pushover.go @@ -23,6 +23,7 @@ var defaultLevels []logrus.Level = []logrus.Level{ logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, + logrus.WarnLevel, } func (p *PushoverHook) Levels() []logrus.Level { @@ -48,8 +49,20 @@ func (p *PushoverHook) Fire(entry *logrus.Entry) error { if entry.Level == log.LevelError { pr = 0 } - if entry.Data["type"].(string) == "alert" { - pr = 1 + if entry.Level == log.LevelWarn { + pr = 0 + } + if entry.Data["type"] != nil { + if entry.Data["type"].(string) == "alert" { + pr = 1 + } + if entry.Data["type"].(string) == "start" { + pr = 1 + } + } + title := "No cluster!" + if entry.Data["cluster"] != nil { + title = "Cluster: " + entry.Data["cluster"].(string) } message := &client.Message{ Message: entry.Message, @@ -61,6 +74,6 @@ func (p *PushoverHook) Fire(entry *logrus.Entry) error { if err != nil { return fmt.Errorf("Could not send message to Pushover API: %s", err) } - + fmt.Printf("Message from PUSHOVER is send!!, %s, %d", entry.Data["type"].(string), pr) return nil } From 42ddd6b59098fa75dced3bcd0975bfdbff79cf40 Mon Sep 17 00:00:00 2001 From: emma Date: Fri, 14 Apr 2023 09:22:03 +0200 Subject: [PATCH 09/39] fix pushover alerting --- cluster/cluster_log.go | 2 +- utils/logrus/hooks/pushover/pushover.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cluster/cluster_log.go b/cluster/cluster_log.go index ab124d91d..eebdb76fd 100644 --- a/cluster/cluster_log.go +++ b/cluster/cluster_log.go @@ -207,7 +207,7 @@ func (cluster *Cluster) LogPrintf(level string, format string, args ...interface go cluster.sendMsTeams(level, format, args) } case "START": - log.WithFields(log.Fields{"cluster": cluster.Name, "type": "alert", "channel": "StdOut"}).Warnf(cliformat, args...) + log.WithFields(log.Fields{"cluster": cluster.Name, "type": "start", "channel": "StdOut"}).Warnf(cliformat, args...) if cluster.Conf.SlackURL != "" { cluster.LogSlack.WithFields(log.Fields{"cluster": cluster.Name, "type": "start", "channel": "Slack"}).Warnf(cliformat, args...) } diff --git a/utils/logrus/hooks/pushover/pushover.go b/utils/logrus/hooks/pushover/pushover.go index 5630f0a8b..b7e7d520e 100644 --- a/utils/logrus/hooks/pushover/pushover.go +++ b/utils/logrus/hooks/pushover/pushover.go @@ -67,13 +67,12 @@ func (p *PushoverHook) Fire(entry *logrus.Entry) error { message := &client.Message{ Message: entry.Message, Timestamp: entry.Time.Unix(), - Title: "Cluster: " + entry.Data["cluster"].(string), + Title: title, Priority: pr, } _, err := p.app.SendMessage(message, p.recipient) if err != nil { return fmt.Errorf("Could not send message to Pushover API: %s", err) } - fmt.Printf("Message from PUSHOVER is send!!, %s, %d", entry.Data["type"].(string), pr) return nil } From ee902ec8561b48ad8a2e1357da6953fe5568bb10 Mon Sep 17 00:00:00 2001 From: emma Date: Fri, 14 Apr 2023 10:27:46 +0200 Subject: [PATCH 10/39] add default flag map cluster --- cluster/cluster.go | 4 +++- cluster/cluster_add.go | 2 +- cluster/cluster_get.go | 8 ++++++++ config/config.go | 9 ++++++--- server/server.go | 6 ++++-- server/server_monitor.go | 11 +++++++++-- 6 files changed, 31 insertions(+), 9 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 5ad31c448..e51c50b67 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -92,6 +92,7 @@ type Cluster struct { Confs *config.ConfVersion `json:"-"` ImmuableFlagMap map[string]interface{} `json:"-"` DynamicFlagMap map[string]interface{} `json:"-"` + DefaultFlagMap map[string]interface{} `json:"-"` CleanAll bool `json:"cleanReplication"` //used in testing Topology string `json:"topology"` Uptime string `json:"uptime"` @@ -275,10 +276,11 @@ const ( ) // Init initial cluster definition -func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface{}, dyn map[string]interface{}, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { +func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface{}, dyn map[string]interface{}, def map[string]interface{}, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { cluster.Confs = confs cluster.ImmuableFlagMap = imm cluster.DynamicFlagMap = dyn + cluster.DefaultFlagMap = def conf := confs.ConfInit cluster.SqlErrorLog = logsql.New() cluster.SqlGeneralLog = logsql.New() diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 3078a0073..26f602d30 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -99,7 +99,7 @@ func (cluster *Cluster) AddDynamicFlagMap(name string, val string) { cluster.DynamicFlagMap[name] = !b.(bool) } else { - cluster.DynamicFlagMap[name] = !cluster.Confs.ConfFlag.GetBool(name) + cluster.DynamicFlagMap[name] = !cluster.GetBoolDefaultFlagMap(name) } } else { diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index b0731c643..c5ea37156 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -882,3 +882,11 @@ func (cluster *Cluster) GetVaultConnection() (*vault.Client, error) { } return nil, errors.New("Not using Vault") } + +func (cluster *Cluster) GetBoolDefaultFlagMap(name string) bool { + v, ok := cluster.DefaultFlagMap[name] + if ok { + return v.(bool) + } + return false +} diff --git a/config/config.go b/config/config.go index 373364fa8..55b8401a1 100644 --- a/config/config.go +++ b/config/config.go @@ -1072,11 +1072,14 @@ func (conf *Config) GetStringValue(name string) string { func (conf *Config) GetBool(name string) bool { values := reflect.ValueOf(conf) types := values.Type() - for i := 0; i < values.NumField(); i++ { - if types.Field(i).Name == name { - return values.Field(i).Bool() + if values != reflect.Zero(types) { + for i := 0; i < values.NumField(); i++ { + if types.Field(i).Name == name { + return values.Field(i).Bool() + } } } + return false } diff --git a/server/server.go b/server/server.go index 6628057c0..e7fff3b22 100644 --- a/server/server.go +++ b/server/server.go @@ -67,6 +67,7 @@ type ReplicationManager struct { ConfFlag config.Config `json:"-"` ImmuableFlagMaps map[string]map[string]interface{} `json:"-"` DynamicFlagMaps map[string]map[string]interface{} `json:"-"` + DefaultFlagMap map[string]interface{} `json:"-"` CommandLineFlag []string `json:"-"` ConfigPathList []string `json:"-"` Logs s18log.HttpLog `json:"logs"` @@ -263,6 +264,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //fmt.Printf("REPMAN DEFAULT FLAG: ") //ConfFlag is already set in server_monitor to get all default value flag (without being overwrited by command line flag) //repman.ConfFlag.PrintConf() + fmt.Printf("DEFAULT FLAG MAP %s", repman.DefaultFlagMap) //if a config file is already define if conf.ConfigFile != "" { @@ -571,7 +573,7 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab } } - fmt.Printf("GET CLUST CONF Dynamic map : %s\n", clustDynamicMap) + //fmt.Printf("GET CLUST CONF Dynamic map : %s\n", clustDynamicMap) repman.DynamicFlagMaps[cluster] = clustDynamicMap confs.ConfInit = clusterconf @@ -841,7 +843,7 @@ func (repman *ReplicationManager) StartCluster(clusterName string) (*cluster.Clu myClusterConf.WorkingDir = myClusterConf.BaseDir + "/data" } repman.VersionConfs[clusterName].ConfInit = myClusterConf - repman.currentCluster.Init(repman.VersionConfs[clusterName], repman.ImmuableFlagMaps[clusterName], repman.DynamicFlagMaps[clusterName], clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) + repman.currentCluster.Init(repman.VersionConfs[clusterName], repman.ImmuableFlagMaps[clusterName], repman.DynamicFlagMaps[clusterName], repman.DefaultFlagMap, clusterName, &repman.tlog, &repman.Logs, repman.termlength, repman.UUID, repman.Version, repman.Hostname, k) repman.Clusters[clusterName] = repman.currentCluster repman.currentCluster.SetCertificate(repman.OpenSVC) go repman.currentCluster.Run() diff --git a/server/server_monitor.go b/server/server_monitor.go index 5336bbbcc..82381270b 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -31,6 +31,7 @@ import ( ) var repman_default config.Config +var defaultFlagMap map[string]interface{} func init() { @@ -558,10 +559,15 @@ func init() { initLogFlags(monitorCmd) //conf des defaults flag sans les paramètres en ligne de commande - //var repman_default config.Config - v := viper.GetViper() v.SetConfigType("toml") + + defaultFlagMap = make(map[string]interface{}) + + for _, f := range v.AllKeys() { + defaultFlagMap[f] = v.Get(f) + } + v.Unmarshal(&repman_default) //repman_default.PrintConf() @@ -684,6 +690,7 @@ For interacting with this daemon use, RepMan = new(ReplicationManager) RepMan.CommandLineFlag = GetCommandLineFlag(cmd) RepMan.ConfFlag = repman_default + RepMan.DefaultFlagMap = defaultFlagMap RepMan.InitConfig(conf) RepMan.Run() }, From 7df2013141e05e1d62cd43655d7cf543b1a34194 Mon Sep 17 00:00:00 2001 From: apple Date: Fri, 7 Apr 2023 19:37:27 +0200 Subject: [PATCH 11/39] Monitoring slow queries via table log can be disable via monitor-queries = false Monitoring slow queries preserve log table via tracking last slow query timestamp --- cluster/srv.go | 22 ++++++++++++------- cluster/srv_get.go | 18 +++++++++++---- .../cluster-api/cluster-demo/stephane.toml | 8 +++++++ 3 files changed, 36 insertions(+), 12 deletions(-) diff --git a/cluster/srv.go b/cluster/srv.go index 00a465b34..77ebad7c0 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -172,6 +172,8 @@ type ServerMonitor struct { SSTPort string `json:"sstPort"` //used to send data to dbjobs Agent string `json:"agent"` //used to provision service in orchestrator BinaryLogFiles map[string]uint `json:"binaryLogFiles"` + MaxSlowQueryTimestamp int64 `json:"maxSlowQueryTimestamp"` + IsInSlowQueryCapture bool } type serverList []*ServerMonitor @@ -1267,16 +1269,18 @@ func (server *ServerMonitor) Capture() error { func (server *ServerMonitor) SaveInfos() error { type Save struct { - Variables map[string]string `json:"variables"` - ProcessList []dbhelper.Processlist `json:"processlist"` - Status map[string]string `json:"status"` - SlaveStatus []dbhelper.SlaveStatus `json:"slavestatus"` + Variables map[string]string `json:"variables"` + ProcessList []dbhelper.Processlist `json:"processlist"` + Status map[string]string `json:"status"` + SlaveStatus []dbhelper.SlaveStatus `json:"slavestatus"` + MaxSlowQueryTimestamp int64 `json:"maxSlowQueryTimestamp"` } var clsave Save clsave.Variables = server.Variables clsave.Status = server.Status clsave.ProcessList = server.FullProcessList clsave.SlaveStatus = server.LastSeenReplications + clsave.MaxSlowQueryTimestamp = server.MaxSlowQueryTimestamp saveJSON, _ := json.MarshalIndent(clsave, "", "\t") err := ioutil.WriteFile(server.Datadir+"/serverstate.json", saveJSON, 0644) if err != nil { @@ -1287,10 +1291,11 @@ func (server *ServerMonitor) SaveInfos() error { func (server *ServerMonitor) ReloadSaveInfosVariables() error { type Save struct { - Variables map[string]string `json:"variables"` - ProcessList []dbhelper.Processlist `json:"processlist"` - Status map[string]string `json:"status"` - SlaveStatus []dbhelper.SlaveStatus `json:"slavestatus"` + Variables map[string]string `json:"variables"` + ProcessList []dbhelper.Processlist `json:"processlist"` + Status map[string]string `json:"status"` + SlaveStatus []dbhelper.SlaveStatus `json:"slavestatus"` + MaxSlowQueryTimestamp int64 `json:"maxSlowQueryTimestamp"` } var clsave Save @@ -1305,6 +1310,7 @@ func (server *ServerMonitor) ReloadSaveInfosVariables() error { return err } server.Variables = clsave.Variables + server.MaxSlowQueryTimestamp = clsave.MaxSlowQueryTimestamp return nil } diff --git a/cluster/srv_get.go b/cluster/srv_get.go index 1bbaa98ba..797bffe8d 100644 --- a/cluster/srv_get.go +++ b/cluster/srv_get.go @@ -514,6 +514,7 @@ func (server *ServerMonitor) GetNewDBConn() (*sqlx.DB, error) { } func (server *ServerMonitor) GetSlowLogTable() { + if server.ClusterGroup.IsInFailover() { return } @@ -523,6 +524,14 @@ func (server *ServerMonitor) GetSlowLogTable() { if server.IsDown() { return } + if !server.GetCluster().GetConf().MonitorQueries { + return + } + if server.IsInSlowQueryCapture { + return + } + server.IsInSlowQueryCapture = true + defer func() { server.IsInSlowQueryCapture = false }() f, err := os.OpenFile(server.Datadir+"/log/log_slow_query.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) if err != nil { server.ClusterGroup.LogPrintf(LvlErr, "Error writing slow queries %s", err) @@ -538,11 +547,10 @@ func (server *ServerMonitor) GetSlowLogTable() { slowqueries := []dbhelper.LogSlow{} if server.DBVersion.IsMySQLOrPercona() { - err = server.Conn.Select(&slowqueries, "SELECT FLOOR(UNIX_TIMESTAMP(start_time)) as start_time, user_host,TIME_TO_SEC(query_time) AS query_time,TIME_TO_SEC(lock_time) AS lock_time,rows_sent,rows_examined,db,last_insert_id,insert_id,server_id,sql_text,thread_id, 0 as rows_affected FROM mysql.slow_log") + err = server.Conn.Select(&slowqueries, "SELECT FLOOR(UNIX_TIMESTAMP(start_time)) as start_time, user_host,TIME_TO_SEC(query_time) AS query_time,TIME_TO_SEC(lock_time) AS lock_time,rows_sent,rows_examined,db,last_insert_id,insert_id,server_id,sql_text,thread_id, 0 as rows_affected FROM mysql.slow_log WHERE start_time > FROM_UNIXTIME("+strconv.FormatInt(server.MaxSlowQueryTimestamp+1, 10)+")") } else { - err = server.Conn.Select(&slowqueries, "SELECT FLOOR(UNIX_TIMESTAMP(start_time)) as start_time, user_host,TIME_TO_SEC(query_time) AS query_time,TIME_TO_SEC(lock_time) AS lock_time,rows_sent,rows_examined,db,last_insert_id,insert_id,server_id,sql_text,thread_id,0 as rows_affected FROM mysql.slow_log") + err = server.Conn.Select(&slowqueries, "SELECT FLOOR(UNIX_TIMESTAMP(start_time)) as start_time, user_host,TIME_TO_SEC(query_time) AS query_time,TIME_TO_SEC(lock_time) AS lock_time,rows_sent,rows_examined,db,last_insert_id,insert_id,server_id,sql_text,thread_id,0 as rows_affected FROM mysql.slow_log WHERE start_time > FROM_UNIXTIME("+strconv.FormatInt(server.MaxSlowQueryTimestamp+1, 10)+")") } - if err != nil { server.ClusterGroup.LogPrintf(LvlErr, "Could not get slow queries from table %s", err) } @@ -560,8 +568,10 @@ func (server *ServerMonitor) GetSlowLogTable() { s.Start_time, strings.Replace(strings.Replace(s.Sql_text.String, "\r\n", " ", -1), "\n", " ", -1), ) + server.MaxSlowQueryTimestamp = s.Start_time } - server.ExecQueryNoBinLog("TRUNCATE mysql.slow_log") + + // server.ExecQueryNoBinLog("TRUNCATE mysql.slow_log") } func (server *ServerMonitor) GetTables() []v3.Table { diff --git a/etc/opensvc/cluster-api/cluster-demo/stephane.toml b/etc/opensvc/cluster-api/cluster-demo/stephane.toml index 03e08c775..4070219b2 100644 --- a/etc/opensvc/cluster-api/cluster-demo/stephane.toml +++ b/etc/opensvc/cluster-api/cluster-demo/stephane.toml @@ -34,6 +34,14 @@ prov-proxy-service-type = "docker" prov-proxy-disk-type = "volume" prov-proxy-volume-data = "tank" test=true + + +alert-pushover-app-token="ad5t1i6m4491ioauubaitqtg1kewtu" +alert-pushover-user-token="uixs8gufi8859u8k8zqeojx7i948fi" + +#monitoring-ssl-cert="/Users/apple/.ssh/id_rsa" +#monitoring-ssl-key="/Users/apple/.ssh/id_rsa.pub" + #arbitration-external= true #arbitration-external-hosts="10.8.0.50:8080" #arbitration-peer-hosts="10.8.0.72:10001" From 4f02c95cb56506f67a9405590a8b6bf7127a1add Mon Sep 17 00:00:00 2001 From: apple Date: Tue, 11 Apr 2023 17:10:46 +0200 Subject: [PATCH 12/39] Fetch all PFS queries in backgroud without order by --- cluster/srv.go | 11 +++-------- cluster/srv_get.go | 17 +++++++++++++++++ utils/dbhelper/dbhelper.go | 8 +++++--- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/cluster/srv.go b/cluster/srv.go index 77ebad7c0..0522623ae 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -174,6 +174,7 @@ type ServerMonitor struct { BinaryLogFiles map[string]uint `json:"binaryLogFiles"` MaxSlowQueryTimestamp int64 `json:"maxSlowQueryTimestamp"` IsInSlowQueryCapture bool + IsInPFSQueryCapture bool } type serverList []*ServerMonitor @@ -762,20 +763,14 @@ func (server *ServerMonitor) Refresh() error { server.EngineInnoDB, logs, err = dbhelper.GetEngineInnoDBVariables(server.Conn) server.ClusterGroup.LogSQL(logs, err, server.URL, "Monitor", LvlDbg, "Could not get engine innodb status %s %s", server.URL, err) } - if server.ClusterGroup.Conf.MonitorPFS && server.HavePFSSlowQueryLog && server.HavePFS { - // GET PFS query digest - server.PFSQueries, logs, err = dbhelper.GetQueries(server.Conn) - server.ClusterGroup.LogSQL(logs, err, server.URL, "Monitor", LvlDbg, "Could not get queries %s %s", server.URL, err) - } + go server.GetPFSQueries() + go server.GetSlowLogTable() if server.HaveDiskMonitor { server.Disks, logs, err = dbhelper.GetDisks(server.Conn, server.DBVersion) } if server.ClusterGroup.Conf.MonitorScheduler { server.CheckDisks() } - if server.HasLogsInSystemTables() { - go server.GetSlowLogTable() - } } // End not PG diff --git a/cluster/srv_get.go b/cluster/srv_get.go index 797bffe8d..a5dfe3111 100644 --- a/cluster/srv_get.go +++ b/cluster/srv_get.go @@ -384,6 +384,23 @@ func (server *ServerMonitor) GetErrorLog() s18log.HttpLog { return server.ErrorLog } +func (server *ServerMonitor) GetPFSQueries() { + if !(server.ClusterGroup.Conf.MonitorPFS && server.HavePFSSlowQueryLog && server.HavePFS) { + return + } + if server.IsInPFSQueryCapture { + return + } + server.IsInPFSQueryCapture = true + defer func() { server.IsInPFSQueryCapture = false }() + + var err error + logs := "" + // GET PFS query digest + server.PFSQueries, logs, err = dbhelper.GetQueries(server.Conn) + server.ClusterGroup.LogSQL(logs, err, server.URL, "Monitor", LvlDbg, "Could not get queries %s %s", server.URL, err) +} + func (server *ServerMonitor) GetPFSStatements() []dbhelper.PFSQuery { var rows []dbhelper.PFSQuery for _, v := range server.PFSQueries { diff --git a/utils/dbhelper/dbhelper.go b/utils/dbhelper/dbhelper.go index 6eb7f6a4d..d6016e42a 100644 --- a/utils/dbhelper/dbhelper.go +++ b/utils/dbhelper/dbhelper.go @@ -1592,9 +1592,11 @@ func GetQueries(db *sqlx.DB) (map[string]PFSQuery, string, error) { A.SUM_ROWS_EXAMINED AS rows_scanned, round(A.sum_timer_wait/1000000000000, 6) as value FROM performance_schema.events_statements_summary_by_digest A - WHERE A.digest_text is not null - ORDER BY A.sum_timer_wait desc - LIMIT 50` + WHERE A.digest_text is not null` + + // Do not order as it's eavy fot temporary directory + //ORDER BY A.sum_timer_wait desc + //LIMIT 50` rows, err := db.Queryx(query) if err != nil { From ff877f441d7d1acf8ce6efdc1bc948153cc0c6b5 Mon Sep 17 00:00:00 2001 From: emma Date: Fri, 14 Apr 2023 17:40:21 +0200 Subject: [PATCH 13/39] in monitoring save config mode, create a config.toml file that saved the dynamic changes of not immuable flags and a overwrite.toml config file that saved the dynamic changes on immuable flags --- cluster/cluster.go | 91 ++++++++++++++++------------------------ cluster/cluster_add.go | 44 ------------------- cluster/cluster_get.go | 8 ---- config/config.go | 36 +++------------- go.mod | 1 + go.sum | 2 + server/api_cluster.go | 7 ---- server/server.go | 50 ++++++++++++---------- server/server_monitor.go | 6 --- 9 files changed, 70 insertions(+), 175 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index e0b907d02..77c1cc627 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -15,13 +15,12 @@ import ( "io/ioutil" "os" "os/exec" - "reflect" "strings" "sync" - t "text/template" "time" "github.com/bluele/logrus_slack" + "github.com/pelletier/go-toml" "github.com/signal18/replication-manager/cluster/configurator" "github.com/signal18/replication-manager/cluster/nbc" "github.com/signal18/replication-manager/config" @@ -784,42 +783,27 @@ func (cluster *Cluster) Save() error { } defer file.Close() - /* - values := reflect.ValueOf(myconf["saved-"+cluster.Name]) - types := values.Type() - s := "" - ss := "" - file.WriteString("[saved-" + cluster.Name + "]\n") - for i := 0; i < values.NumField(); i++ { - _, ok := cluster.ImmuableFlagMap[types.Name()] - if values.Field(i).String() != "" || !ok { - if types.Field(i).Type.String() == "string" { - s = " " + types.Field(i).Name + " = \"" + values.Field(i).String() + "\"\n" - } - if types.Field(i).Type.String() == "bool" || types.Field(i).Type.String() == "int" || types.Field(i).Type.String() == "uint64" || types.Field(i).Type.String() == "int64" { - s = " " + types.Field(i).Name + " = " - ss = format(" {{.}} \n", values.Field(i)) - } - file.WriteString(s) - file.WriteString(ss) - ss = "" - } - }*/ - s := "" - ss := "" - file.WriteString("[saved-" + cluster.Name + "]\n") - for tag := range cluster.DynamicFlagMap { - s = " " + tag + " = " - fmt.Printf("SAVE : %s", tag) - if reflect.TypeOf(cluster.DynamicFlagMap[tag]).String() == "string" { - s += "'" - ss = format("{{.}}", cluster.DynamicFlagMap[tag]) + "'\n" + readconf, _ := toml.Marshal(cluster.Conf) + t, _ := toml.LoadBytes(readconf) + s := t + keys := t.Keys() + for _, key := range keys { + _, ok := cluster.ImmuableFlagMap[key] + if ok { + s.Delete(key) } else { - ss = format(" {{.}} \n", cluster.DynamicFlagMap[tag]) + v, ok := cluster.DefaultFlagMap[key] + if ok && fmt.Sprintf("%v", s.Get(key)) == fmt.Sprintf("%v", v) { + s.Delete(key) + } + if !ok { + s.Delete(key) + } } - file.WriteString(s) - file.WriteString(ss) } + file.WriteString("[saved-" + cluster.Name + "]\n") + s.WriteTo(file) + err = cluster.Overwrite() if err != nil { cluster.LogPrintf(LvlInfo, "Error during Overwriting: %s", err) @@ -829,12 +813,6 @@ func (cluster *Cluster) Save() error { return nil } -func format(s string, v interface{}) string { - c, b := new(t.Template), new(strings.Builder) - t.Must(c.Parse(s)).Execute(b, v) - return b.String() -} - func (cluster *Cluster) Overwrite() error { if cluster.Conf.ConfRewrite { @@ -850,25 +828,26 @@ func (cluster *Cluster) Overwrite() error { return err } defer file.Close() - s := "" - ss := "" - file.WriteString("[overwrite-" + cluster.Name + "]\n") - for tag := range cluster.ImmuableFlagMap { - _, ok := cluster.DynamicFlagMap[tag] - if ok { - s = " " + tag + " = " - if reflect.TypeOf(cluster.DynamicFlagMap[tag]).String() == "string" { - s += "'" - ss = format("{{.}}", cluster.DynamicFlagMap[tag]) + "'\n" - } else { - ss = format(" {{.}} \n", cluster.DynamicFlagMap[tag]) - } - file.WriteString(s) - file.WriteString(ss) + readconf, _ := toml.Marshal(cluster.Conf) + t, _ := toml.LoadBytes(readconf) + s := t + keys := t.Keys() + for _, key := range keys { + + v, ok := cluster.ImmuableFlagMap[key] + if !ok { + s.Delete(key) + } else { + + if ok && fmt.Sprintf("%v", s.Get(key)) == fmt.Sprintf("%v", v) { + s.Delete(key) + } } } + file.WriteString("[overwrite-" + cluster.Name + "]\n") + s.WriteTo(file) } return nil diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 26f602d30..1c0f3dd74 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -7,8 +7,6 @@ package cluster import ( - "fmt" - "strconv" "strings" "sync" @@ -38,7 +36,6 @@ func (cluster *Cluster) AddDBTagConfig(tag string) { cluster.Configurator.AddDBTag(tag) cluster.Conf.ProvTags = cluster.Configurator.GetConfigDBTags() cluster.SetClusterCredentialsFromConfig() - cluster.AddDBTagDynamicFlagMap(tag) } } @@ -66,47 +63,6 @@ func (cluster *Cluster) AddDBTag(tag string) { } -func (cluster *Cluster) AddDBTagDynamicFlagMap(tag string) { - //to add dynamic tag in the cluster dynamic flag map - v, ok := cluster.DynamicFlagMap["prov-db-tags"] - if ok { - str := fmt.Sprintf("%v", v) - if !strings.Contains(str, tag) { - str += "," + tag - cluster.DynamicFlagMap["prov-db-tags"] = str - } - } else { - v, ok := cluster.ImmuableFlagMap["prov-db-tags"] - if ok { - imm_tag := fmt.Sprintf("%v", v) - cluster.DynamicFlagMap["prov-db-tags"] = imm_tag + "," + tag - } else { - cluster.DynamicFlagMap["prov-db-tags"] = tag - } - - } -} - -func (cluster *Cluster) AddDynamicFlagMap(name string, val string) { - //to add dynamic setting in the cluster dynamic flag map - if name == "failover-max-slave-delay" { - cluster.DynamicFlagMap[name], _ = strconv.ParseInt(val, 10, 64) - } else if name == "failover-limit" { - cluster.DynamicFlagMap[name], _ = strconv.Atoi(val) - } else if val == "bool" { - b, ok := cluster.ImmuableFlagMap[name] - if ok { - cluster.DynamicFlagMap[name] = !b.(bool) - } else { - - cluster.DynamicFlagMap[name] = !cluster.GetBoolDefaultFlagMap(name) - } - - } else { - cluster.DynamicFlagMap[name] = val - } -} - func (cluster *Cluster) AddProxyTag(tag string) { cluster.Configurator.AddProxyTag(tag) cluster.Conf.ProvProxTags = cluster.Configurator.GetConfigProxyTags() diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index c5ea37156..b0731c643 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -882,11 +882,3 @@ func (cluster *Cluster) GetVaultConnection() (*vault.Client, error) { } return nil, errors.New("Not using Vault") } - -func (cluster *Cluster) GetBoolDefaultFlagMap(name string) bool { - v, ok := cluster.DefaultFlagMap[name] - if ok { - return v.(bool) - } - return false -} diff --git a/config/config.go b/config/config.go index 55b8401a1..c29f8cea1 100644 --- a/config/config.go +++ b/config/config.go @@ -349,7 +349,7 @@ type Config struct { OnPremiseSSHStartDbScript string `mapstructure:"onpremise-ssh-start-db-script" toml:"onpremise-ssh-start-db-script" json:"onpremiseSshStartDbScript"` OnPremiseSSHStartProxyScript string `mapstructure:"onpremise-ssh-start-proxy-script" toml:"onpremise-ssh-start-proxy-script" json:"onpremiseSshStartProxyScript"` OnPremiseSSHDbJobScript string `mapstructure:"onpremise-ssh-db-job-script" toml:"onpremise-ssh-db-job-script" json:"onpremiseSshDbJobScript"` - ProvOpensvcP12Certificate string `mapstructure:"opensvc-p12-certificate" toml:"opensvc-p12-certificat" json:"opensvcP12Certificate"` + ProvOpensvcP12Certificate string `mapstructure:"opensvc-p12-certificate" toml:"opensvc-p12-certificate" json:"opensvcP12Certificate"` ProvOpensvcP12Secret string `mapstructure:"opensvc-p12-secret" toml:"opensvc-p12-secret" json:"opensvcP12Secret"` ProvOpensvcUseCollectorAPI bool `mapstructure:"opensvc-use-collector-api" toml:"opensvc-use-collector-api" json:"opensvcUseCollectorApi"` ProvOpensvcCollectorAccount string `mapstructure:"opensvc-collector-account" toml:"opensvc-collector-account" json:"opensvcCollectorAccount"` @@ -588,11 +588,10 @@ type MyDumperMetaData struct { } type ConfVersion struct { - ConfDynamic Config `json:"-"` - ConfFlag Config `json:"-"` - ConfImmutable Config `json:"-"` - ConfInit Config `json:"-"` - ConfDecode Config `json:"-"` + ConfInit Config `json:"-"` + ConfDecode Config `json:"-"` + ConfDynamic Config `json:"-"` + ConfImmuable Config `json:"-"` } const ( @@ -1058,31 +1057,6 @@ func (conf *Config) GetTarballUrl(name string) (string, error) { return "", errors.New("tarball not found in collection") } -func (conf *Config) GetStringValue(name string) string { - values := reflect.ValueOf(conf) - types := values.Type() - for i := 0; i < values.NumField(); i++ { - if types.Field(i).Name == name { - return values.Field(i).String() - } - } - return "" -} - -func (conf *Config) GetBool(name string) bool { - values := reflect.ValueOf(conf) - types := values.Type() - if values != reflect.Zero(types) { - for i := 0; i < values.NumField(); i++ { - if types.Field(i).Name == name { - return values.Field(i).Bool() - } - } - } - - return false -} - func (conf Config) PrintConf() { values := reflect.ValueOf(conf) types := values.Type() diff --git a/go.mod b/go.mod index fb3605a27..417b22a1f 100644 --- a/go.mod +++ b/go.mod @@ -130,6 +130,7 @@ require ( github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/miekg/dns v1.1.43 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/xattr v0.4.6 github.com/rs/cors v1.7.0 // indirect github.com/siddontang/go-log v0.0.0-20190221022429-1e957dd83bed // indirect diff --git a/go.sum b/go.sum index d6199fed4..b0e2dd787 100644 --- a/go.sum +++ b/go.sum @@ -616,6 +616,8 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/percona/go-mysql v0.0.0-20190307200310-f5cfaf6a5e55 h1:kx48fD4K+GXb7YqwykiWirM8GRtoMcpWbGlaAb3IsqE= github.com/percona/go-mysql v0.0.0-20190307200310-f5cfaf6a5e55/go.mod h1:/SGLf9OMxlnK6jq4mkFiImBcJXXk5jwD+lDrwDaGXcw= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= diff --git a/server/api_cluster.go b/server/api_cluster.go index b7d29401d..3f13f68f3 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -1088,9 +1088,6 @@ func (repman *ReplicationManager) switchSettings(mycluster *cluster.Cluster, set case "monitoring-processlist": mycluster.SwitchMonitoringProcesslist() } - if mycluster.IsVariableDiffFromRepmanDefault(setting) { - mycluster.AddDynamicFlagMap(setting, "bool") - } } func (repman *ReplicationManager) handlerMuxSetSettings(w http.ResponseWriter, r *http.Request) { @@ -1244,9 +1241,6 @@ func (repman *ReplicationManager) setSetting(mycluster *cluster.Cluster, name st case "backup-binlogs-keep": mycluster.SetBackupBinlogsKeep(value) } - if mycluster.IsVariableDiffFromRepmanDefault(name) { - mycluster.AddDynamicFlagMap(name, value) - } } func (repman *ReplicationManager) handlerMuxAddTag(w http.ResponseWriter, r *http.Request) { @@ -1275,7 +1269,6 @@ func (repman *ReplicationManager) handlerMuxAddProxyTag(w http.ResponseWriter, r http.Error(w, "No valid ACL", 403) return } - mycluster.AddProxyTag(vars["tagValue"]) } else { http.Error(w, "Cluster Not Found", 500) return diff --git a/server/server.go b/server/server.go index e7fff3b22..733281836 100644 --- a/server/server.go +++ b/server/server.go @@ -64,7 +64,6 @@ type ReplicationManager struct { ClusterList []string `json:"clusters"` Tests []string `json:"tests"` Conf config.Config `json:"config"` - ConfFlag config.Config `json:"-"` ImmuableFlagMaps map[string]map[string]interface{} `json:"-"` DynamicFlagMaps map[string]map[string]interface{} `json:"-"` DefaultFlagMap map[string]interface{} `json:"-"` @@ -261,10 +260,8 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { fistRead := viper.GetViper() fistRead.SetConfigType("toml") - //fmt.Printf("REPMAN DEFAULT FLAG: ") - //ConfFlag is already set in server_monitor to get all default value flag (without being overwrited by command line flag) - //repman.ConfFlag.PrintConf() - fmt.Printf("DEFAULT FLAG MAP %s", repman.DefaultFlagMap) + //DefaultFlagMap is a map that contain all default flag value, set in the server_monitor.go file + //fmt.Printf("%s", repman.DefaultFlagMap) //if a config file is already define if conf.ConfigFile != "" { @@ -327,7 +324,11 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //Add immuatable flag from default section for _, f := range secRead.AllKeys() { - ImmuableMap[f] = secRead.Get(f) + v := secRead.Get(f) + if v != nil { + ImmuableMap[f] = secRead.Get(f) + } + } //test.PrintConf() @@ -532,7 +533,11 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab //Add immuatable flag from cluster section for _, f := range cf2.AllKeys() { - clustImmuableMap[f] = cf2.Get(f) + v := cf2.Get(f) + if v != nil { + clustImmuableMap[f] = v + } + } //clusterconf.PrintConf() @@ -542,8 +547,7 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab repman.ImmuableFlagMaps[cluster] = clustImmuableMap //store default cluster config in immutable config (all parameter set in default and cluster section, default value and command line) - confs.ConfImmutable = clusterconf - confs.ConfFlag = repman.ConfFlag + confs.ConfImmuable = clusterconf //fmt.Printf("%+v\n", cf2.AllSettings()) @@ -555,25 +559,25 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab } else { repman.initAlias(cf3) cf3.Unmarshal(&clusterconf) - } - confs.ConfDynamic = clusterconf - //to add flag in cluster dynamic map only if not defined yet or if the flag value read is diff from immuable flag value - for _, f := range cf2.AllKeys() { - v := cf2.Get(f) - if v != nil { - imm_v, ok := clustImmuableMap[f] - if ok && imm_v != v { - clustDynamicMap[f] = v - } - if !ok { - clustDynamicMap[f] = v + //to add flag in cluster dynamic map only if not defined yet or if the flag value read is diff from immuable flag value + for _, f := range cf3.AllKeys() { + v := cf3.Get(f) + if v != nil { + imm_v, ok := clustImmuableMap[f] + if ok && imm_v != v { + clustDynamicMap[f] = v + } + if !ok { + clustDynamicMap[f] = v + } + } } - } + confs.ConfDynamic = clusterconf + } - //fmt.Printf("GET CLUST CONF Dynamic map : %s\n", clustDynamicMap) repman.DynamicFlagMaps[cluster] = clustDynamicMap confs.ConfInit = clusterconf diff --git a/server/server_monitor.go b/server/server_monitor.go index 82381270b..cab8055f0 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -23,14 +23,12 @@ import ( "github.com/go-sql-driver/mysql" "github.com/signal18/replication-manager/cluster" - "github.com/signal18/replication-manager/config" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) -var repman_default config.Config var defaultFlagMap map[string]interface{} func init() { @@ -568,9 +566,6 @@ func init() { defaultFlagMap[f] = v.Get(f) } - v.Unmarshal(&repman_default) - //repman_default.PrintConf() - viper.BindPFlags(monitorCmd.Flags()) /* @@ -689,7 +684,6 @@ For interacting with this daemon use, RepMan = new(ReplicationManager) RepMan.CommandLineFlag = GetCommandLineFlag(cmd) - RepMan.ConfFlag = repman_default RepMan.DefaultFlagMap = defaultFlagMap RepMan.InitConfig(conf) RepMan.Run() From d0ebb691ddf887773e6d5df8edf9bec8839bdf9b Mon Sep 17 00:00:00 2001 From: emma Date: Mon, 17 Apr 2023 09:29:53 +0200 Subject: [PATCH 14/39] add keypath flag to passwordCmd --- server/password.go | 1 + 1 file changed, 1 insertion(+) diff --git a/server/password.go b/server/password.go index 0decc4846..7006f9be3 100644 --- a/server/password.go +++ b/server/password.go @@ -25,6 +25,7 @@ func init() { rootCmd.AddCommand(passwordCmd) keygenCmd.Flags().StringVar(&keyPath, "keypath", "/etc/replication-manager/.replication-manager.key", "Encryption key file path") keygenCmd.Flags().BoolVar(&overwrite, "overwrite", false, "Overwrite the previous key") + passwordCmd.Flags().StringVar(&keyPath, "keypath", "/etc/replication-manager/.replication-manager.key", "Encryption key file path") } var keygenCmd = &cobra.Command{ From f63d89f5faf522396638a6a3fc7ce712c1029013 Mon Sep 17 00:00:00 2001 From: emma Date: Wed, 19 Apr 2023 16:03:08 +0200 Subject: [PATCH 15/39] add github storage for the dynamic config file --- certgen.go | 2 - cluster/cluster.go | 154 ++++++++++++++++++++++++++++++++++++++- cluster/cluster_add.go | 2 + cluster/cluster_sec.go | 7 -- cluster/cluster_set.go | 32 ++++++-- cluster/cluster_topo.go | 5 +- config/config.go | 2 + go.mod | 6 +- go.sum | 96 ++++++++++++++++++++++++ server/api_cluster.go | 5 +- server/server.go | 44 ++++++++--- server/server_add.go | 19 ++++- server/server_monitor.go | 12 ++- utils/crypto/crypto.go | 1 - 14 files changed, 349 insertions(+), 38 deletions(-) diff --git a/certgen.go b/certgen.go index 408eda07d..b0d7b3263 100644 --- a/certgen.go +++ b/certgen.go @@ -7,7 +7,6 @@ package main import ( "flag" - "fmt" "log" "os" "time" @@ -26,7 +25,6 @@ var ( ) func main() { - fmt.Printf("COUCOU") flag.Parse() if len(*host) == 0 { diff --git a/cluster/cluster.go b/cluster/cluster.go index bc6ebe654..0ccec7976 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -20,6 +20,10 @@ import ( "time" "github.com/bluele/logrus_slack" + "github.com/go-git/go-git/v5" + git_ex "github.com/go-git/go-git/v5/_examples" + git_obj "github.com/go-git/go-git/v5/plumbing/object" + git_http "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/pelletier/go-toml" "github.com/signal18/replication-manager/cluster/configurator" "github.com/signal18/replication-manager/cluster/nbc" @@ -28,6 +32,7 @@ import ( "github.com/signal18/replication-manager/router/maxscale" "github.com/signal18/replication-manager/utils/alert" "github.com/signal18/replication-manager/utils/cron" + crypto "github.com/signal18/replication-manager/utils/crypto" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/logrus/hooks/pushover" "github.com/signal18/replication-manager/utils/s18log" @@ -274,6 +279,28 @@ const ( VaultDbEngine string = "database_engine" ) +// to store the flags to encrypt in the git (in Save() function) +var encryptFlag = map[string]int{ + "api-credentials": 0, + "api-credentials-external": 0, + "db-servers-credential": 0, + "monitoring-write-heartbeat-credential": 0, + "onpremise-ssh-credential": 0, + "replication-credential": 0, + "shardproxy-credential": 0, + "backup-restic-password": 1, + "haproxy-password": 1, + "maxscale-pass": 1, + "myproxy-password": 1, + "proxysql-password": 1, + "vault-secret-id": 1, + "opensvc-p12-secret": 1, + "backup-restic-aws-access-secret": 1, + "backup-streaming-aws-access-secret": 1, + "arbitration-external-secret": 1, + "alert-pushover-user-token": 1, + "mail-smtp-password": 1} + // Init initial cluster definition func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface{}, dyn map[string]interface{}, def map[string]interface{}, cfgGroup string, tlog *s18log.TermLog, loghttp *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { cluster.Confs = confs @@ -417,7 +444,10 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface cluster.createKeys() cluster.GetPersitentState() - cluster.newServerList() + err = cluster.newServerList() + if err != nil { + cluster.LogPrintf(LvlErr, "Could not set server list %s", err) + } err = cluster.newProxyList() if err != nil { cluster.LogPrintf(LvlErr, "Could not set proxy list %s", err) @@ -435,7 +465,8 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface cluster.DropDBTagConfig("threadpool") cluster.AddDBTagConfig("pkg") } - + //fmt.Printf("INIT CLUSTER CONF :\n") + cluster.Conf.PrintConf() return nil } @@ -577,7 +608,11 @@ func (cluster *Cluster) Run() { } wg.Wait() // AddChildServers can't be done before TopologyDiscover but need a refresh aquiring more fresh gtid vs current cluster so elelection win but server is ignored see electFailoverCandidate - cluster.AddChildServers() + err := cluster.AddChildServers() + + if err != nil { + cluster.LogPrintf(LvlInfo, "Fail of AddChildServers %s", err) + } cluster.IsFailable = cluster.GetStatus() // CheckFailed trigger failover code if passing all false positiv and constraints @@ -771,7 +806,47 @@ func (cluster *Cluster) Save() error { if err != nil { return err } + if cluster.Conf.ConfRewrite { + //clone git repository in case its the first time + if cluster.Conf.GitUrl != "" { + if _, err := os.Stat(cluster.Conf.WorkingDir + "/" + cluster.Name + "/.git"); err == nil { + path := cluster.Conf.WorkingDir + "/" + cluster.Name + + // We instantiate a new repository targeting the given path (the .git folder) + r, err := git.PlainOpen(path) + git_ex.CheckIfError(err) + + // Get the working directory for the repository + w, err := r.Worktree() + git_ex.CheckIfError(err) + + // Pull the latest changes from the origin remote and merge into the current branch + git_ex.Info("git pull origin") + err = w.Pull(&git.PullOptions{RemoteName: "origin"}) + //git_ex.CheckIfError(err) + } else { + url := cluster.Conf.GitUrl + directory := cluster.Conf.WorkingDir + "/" + cluster.Name + + // Clone the given repository to the given directory + git_ex.Info("git clone %s %s --recursive", url, directory) + + _, err := git.PlainClone(directory, false, &git.CloneOptions{ + URL: url, + RecurseSubmodules: git.DefaultSubmoduleRecursionDepth, + Auth: &git_http.BasicAuth{ + Username: "replication-manager", // yes, this can be anything except an empty string + Password: cluster.Conf.GitAccesToken, + }, + }) + + git_ex.CheckIfError(err) + } + + } + //fmt.Printf("SAVE CLUSTER \n") + //cluster.Conf.PrintConf() var myconf = make(map[string]config.Config) myconf["saved-"+cluster.Name] = cluster.Conf @@ -803,8 +878,81 @@ func (cluster *Cluster) Save() error { } } } + for _, key := range keys { + + _, ok := encryptFlag[key] + if ok { + v := s.Get(key) + str := fmt.Sprintf("%v", v) + tmp := strings.Split(str, ":") + if len(tmp) == 2 { + str = tmp[1] + v = tmp[0] + } + + p := crypto.Password{PlainText: str} + var err error + key_path, ok := cluster.ImmuableFlagMap["monitoring-key-path"] + if ok { + p.Key, err = crypto.ReadKey(fmt.Sprintf("%v", key_path)) + if err != nil { + log.Fatalln(err) + } + p.Encrypt() + + if len(tmp) == 2 { + str = fmt.Sprintf("%v", v) + str = str + p.CipherText + v = str + } else { + v = p.CipherText + } + } else { + cluster.LogPrintf(LvlWarn, "Missing key file or wrong key path") + } + + } + } file.WriteString("[saved-" + cluster.Name + "]\n") s.WriteTo(file) + fmt.Printf("SAVE CLUSTER IMMUABLE MAP : %s", cluster.ImmuableFlagMap) + + //to load the new generated config file in github + if cluster.Conf.GitUrl != "" { + directory := cluster.Conf.WorkingDir + "/" + cluster.Name + r, err := git.PlainOpen(directory) + git_ex.CheckIfError(err) + + w, err := r.Worktree() + git_ex.CheckIfError(err) + + msg := "Update config.toml file" + + // Adds the new file to the staging area. + git_ex.Info("git add" + directory + "/config.toml") + _, err = w.Add("config.toml") + git_ex.CheckIfError(err) + + git_ex.Info("git commit -m \"New config file\"") + _, err = w.Commit(msg, &git.CommitOptions{ + Author: &git_obj.Signature{ + Name: "Replication-manager", + Email: cluster.Conf.MailFrom, + When: time.Now(), + }, + }) + + git_ex.CheckIfError(err) + + git_ex.Info("git push") + // push using default options + err = r.Push(&git.PushOptions{Auth: &git_http.BasicAuth{ + Username: "toto", // yes, this can be anything except an empty string + Password: cluster.Conf.GitAccesToken, + }}) + git_ex.CheckIfError(err) + + } err = cluster.Overwrite() if err != nil { diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 1c0f3dd74..bab97852a 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -7,6 +7,7 @@ package cluster import ( + "fmt" "strings" "sync" @@ -16,6 +17,7 @@ import ( ) func (cluster *Cluster) AddSeededServer(srv string) error { + fmt.Printf("ADD SEEDED SERVER\n") if cluster.Conf.Hosts != "" { cluster.Conf.Hosts = cluster.Conf.Hosts + "," + srv } else { diff --git a/cluster/cluster_sec.go b/cluster/cluster_sec.go index 8fca54066..b4d4efbc0 100644 --- a/cluster/cluster_sec.go +++ b/cluster/cluster_sec.go @@ -138,16 +138,9 @@ func (cluster *Cluster) RotatePasswords() error { cluster.LogPrintf(LvlErr, "Fail of ProvisionRotatePasswords during rotation password ", err) } if cluster.GetConf().PushoverAppToken != "" && cluster.GetConf().PushoverUserToken != "" { - //logger := logrus.New() msg := "A password rotation has been made on Replication-Manager " + cluster.Name + " cluster. Check the new password on " + cluster.Conf.VaultServerAddr + " website on path " + cluster.Conf.VaultMount + cluster.Conf.User + " and " + cluster.Conf.VaultMount + cluster.Conf.RplUser + "." cluster.LogPrintf("ALERT", msg) - //entry := logrus.NewEntry(logger) - //msg := "COUCOU test" - //entry.Log(logrus.ErrorLevel, msg) - //p := pushover.NewHook(cluster.GetConf().PushoverAppToken, cluster.GetConf().PushoverUserToken) - //p.Fire(entry) - } if cluster.Conf.MailTo != "" { msg := "A password rotation has been made on Replication-Manager " + cluster.Name + " cluster. Check the new password on " + cluster.Conf.VaultServerAddr + " website on path " + cluster.Conf.VaultMount + "/" + cluster.Conf.User + " and " + cluster.Conf.VaultMount + "/" + cluster.Conf.RplUser + "." diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 639c90333..2f6f2f416 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -969,10 +969,15 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { } } // cluster.LogPrintf(LvlErr, strings.Join(hosts, ",")) - cluster.SetDbServerHosts(strings.Join(hosts, ",")) - + err = cluster.SetDbServerHosts(strings.Join(hosts, ",")) + if err != nil { + cluster.LogPrintf(LvlErr, "SetServicePlan : Fail SetDbServerHosts : %s, for hosts : %s", err, strings.Join(hosts, ",")) + } cluster.sme.SetFailoverState() - cluster.newServerList() + err = cluster.newServerList() + if err != nil { + cluster.LogPrintf(LvlErr, "SetServicePlan : Fail newServerList : %s", err) + } wg := new(sync.WaitGroup) wg.Add(1) go cluster.TopologyDiscover(wg) @@ -997,11 +1002,24 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { } else { cluster.LogPrintf(LvlInfo, "Adding shard proxy monitor 127.0.0.1:%s", portshardproxy) } - cluster.AddSeededProxy(config.ConstProxySqlproxy, "127.0.0.1", portproxysql, "", "") - cluster.AddSeededProxy(config.ConstProxySpider, "127.0.0.1", portshardproxy, "", "") + err = cluster.AddSeededProxy(config.ConstProxySqlproxy, "127.0.0.1", portproxysql, "", "") + if err != nil { + cluster.LogPrintf(LvlErr, "Fail adding proxysql monitor on 127.0.0.1 %s", err) + } + err = cluster.AddSeededProxy(config.ConstProxySpider, "127.0.0.1", portshardproxy, "", "") + if err != nil { + cluster.LogPrintf(LvlErr, "Fail adding shard proxy monitor on 127.0.0.1 %s", err) + } } else { - cluster.AddSeededProxy(config.ConstProxySpider, "shardproxy1", "3306", "", "") - cluster.AddSeededProxy(config.ConstProxySqlproxy, "proxysql1", cluster.Conf.ProxysqlPort, "", "") + err = cluster.AddSeededProxy(config.ConstProxySpider, "shardproxy1", "3306", "", "") + if err != nil { + cluster.LogPrintf(LvlErr, "Fail adding shard proxy monitor on 3306 %s", err) + } + + err = cluster.AddSeededProxy(config.ConstProxySqlproxy, "proxysql1", cluster.Conf.ProxysqlPort, "", "") + if err != nil { + cluster.LogPrintf(LvlErr, "Fail adding proxysql monitor on %s %s", cluster.Conf.ProxysqlPort, err) + } } } else { cluster.LogPrintf(LvlInfo, "Copy proxy list from cluster head %s", cluster.Conf.ClusterHead) diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index f79934002..103ccb465 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -95,7 +95,10 @@ func (cluster *Cluster) AddChildServers() error { cluster.Servers = append(cluster.Servers, srv) wg := new(sync.WaitGroup) wg.Add(1) - cluster.TopologyDiscover(wg) + err = cluster.TopologyDiscover(wg) + if err != nil { + cluster.LogPrintf(LvlWarn, "AddChildServers : Fail to discover a topology %s", err) + } wg.Wait() return nil // leave for next monitor loop to remove the sever if no more link diff --git a/config/config.go b/config/config.go index c29f8cea1..2660fb5a9 100644 --- a/config/config.go +++ b/config/config.go @@ -532,6 +532,8 @@ type Config struct { VaultMode string `mapstructure:"vault-mode" toml:"vault-mode" json:"vaultMode"` VaultMount string `mapstructure:"vault-mount" toml:"vault-mount" json:"vaultMount"` VaultAuth string `mapstructure:"vault-auth" toml:"vault-auth" json:"vaultAuth"` + GitUrl string `mapstructure:"git-url" toml:"git-url" json:"gitUrl"` + GitAccesToken string `mapstructure:"git-acces-token" toml:"git-acces-token" json:"gitAccesToken"` // BackupResticStoragePolicy string `mapstructure:"backup-restic-storage-policy" toml:"backup-restic-storage-policy" json:"backupResticStoragePolicy"` //ProvMode string `mapstructure:"prov-mode" toml:"prov-mode" json:"provMode"` //InitContainer vs API diff --git a/go.mod b/go.mod index 417b22a1f..8a7160320 100644 --- a/go.mod +++ b/go.mod @@ -81,7 +81,7 @@ require ( github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f github.com/xwb1989/sqlparser v0.0.0-20171128062118-da747e0c62c4 github.com/yoheimuta/protolint v0.32.0 - golang.org/x/crypto v0.5.0 + golang.org/x/crypto v0.6.0 golang.org/x/net v0.7.0 google.golang.org/genproto v0.0.0-20210617175327-b9e0b3197ced google.golang.org/grpc v1.38.0 @@ -113,6 +113,8 @@ require ( github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 // indirect github.com/gin-gonic/gin v1.7.2 // indirect + github.com/go-git/go-git v4.7.0+incompatible // indirect + github.com/go-git/go-git/v5 v5.6.1 // indirect github.com/go-ole/go-ole v1.2.5 // indirect github.com/gonum/blas v0.0.0-20180125090452-e7c5890b24cf // indirect github.com/gonum/floats v0.0.0-20180125090339-7de1f4ea7ab5 // indirect @@ -124,7 +126,6 @@ require ( github.com/gregdel/pushover v1.1.0 github.com/hashicorp/vault/api v1.9.0 // indirect github.com/hashicorp/vault/api/auth/approle v0.4.0 // indirect - github.com/imdario/mergo v0.3.11 // indirect github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 // indirect github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570 // indirect @@ -136,5 +137,6 @@ require ( github.com/siddontang/go-log v0.0.0-20190221022429-1e957dd83bed // indirect github.com/smartystreets/goconvey v1.7.2 // indirect google.golang.org/grpc/examples v0.0.0-20220316190256-c4cabf78f4a2 // indirect + gopkg.in/src-d/go-git.v4 v4.13.1 // indirect nhooyr.io/websocket v1.8.7 // indirect ) diff --git a/go.sum b/go.sum index b0e2dd787..6159c0a89 100644 --- a/go.sum +++ b/go.sum @@ -70,20 +70,29 @@ github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1M github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/JaderDias/movingmedian v0.0.0-20170611140316-de8c410559fa h1:bV0zbEchxY6+/yBbwqBAtdLyCPRDJtkp0qRRaK2BseI= github.com/JaderDias/movingmedian v0.0.0-20170611140316-de8c410559fa/go.mod h1:zsfWLaDctbM7aV1TsQAwkVswuKQ0k7PK4rjC1VZqpbI= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.0.1 h1:iLrQrdwjDd52kHDA5op2UBJFjmOb9g+7scBan4RN8F0= github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/aclements/go-moremath v0.0.0-20170210193428-033754ab1fee h1:U/M5WeoRJXGbprTIaGaw8egvYgNU8eXlS727Y4QM1tA= github.com/aclements/go-moremath v0.0.0-20170210193428-033754ab1fee/go.mod h1:idZL3yvz4kzx1dsBOAC+oYv6L92P1oFEhUXUB1A/lwQ= +github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alyu/configparser v0.0.0-20151125021232-26b2fe18bee1 h1:1Gx9bRdpjHB117HvjqEhUJpc47jWVnQCyCv4YfLsBjo= github.com/alyu/configparser v0.0.0-20151125021232-26b2fe18bee1/go.mod h1:AQsRkKr3LShUSgddjIcPP5axBgCGGegOiMu9nHAlqJw= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -91,6 +100,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.29.24 h1:KOnds/LwADMDBaALL4UB98ZR+TUR1A1mYmAYbdLixLA= @@ -104,6 +114,7 @@ github.com/bluele/slack v0.0.0-20180528010058-b4b4d354a079 h1:dm7wU6Dyf+rVGryOAB github.com/bluele/slack v0.0.0-20180528010058-b4b4d354a079/go.mod h1:W679Ri2W93VLD8cVpEY/zLH1ow4zhJcCyjzrKxfM3QM= github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d h1:7IjN4QP3c38xhg6wz8R3YjoU+6S9e7xBc0DAVLLIpHE= github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -112,6 +123,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/codegangsta/negroni v0.3.0 h1:ByBtJaE0u71x6Ebli7lm95c8oCkrmF88+s5qB2o6j8I= @@ -126,6 +139,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dasrick/go-teams-notify/v2 v2.1.0 h1:CSleKfkvrw2O9QmSY/LMHcg5hotuYnV+fftlHk8llRo= github.com/dasrick/go-teams-notify/v2 v2.1.0/go.mod h1:6TLarJg4hBXOybLxZpBvKIqeZiUZqUOM5SS2DLtUjTM= @@ -156,6 +170,9 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -189,6 +206,7 @@ github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 h1:Ghm4eQYC0nEPnSJ github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -204,6 +222,18 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.2 h1:Tg03T9yM2xa8j6I3Z3oqLaQRSmKvxPd6g/2HJ6zICFA= github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= +github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= +github.com/go-git/go-git v4.7.0+incompatible h1:+W9rgGY4DOKKdX2x6HxSR7HNeTxqiKrOvKnuittYVdA= +github.com/go-git/go-git v4.7.0+incompatible/go.mod h1:6+421e08gnZWn30y26Vchf7efgYLe4dl5OQbBSUXShE= +github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= +github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= +github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -303,6 +333,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -416,6 +447,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/improbable-eng/grpc-web v0.14.0 h1:GdoK+cXABdB+1keuqsV1drSFO2XLYIxqt/4Rj8SWGBk= github.com/improbable-eng/grpc-web v0.14.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -430,8 +463,12 @@ github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11/go.mod h1:+DBdDyf github.com/jacobsa/reqtrace v0.0.0-20150505043853-245c9e0234cb/go.mod h1:ivcmUvxXWjb27NsPEaiYK7AidlZXS7oQ5PowUS9z3I4= github.com/jacobsa/syncutil v0.0.0-20180201203307-228ac8e5a6c3/go.mod h1:mPvulh9VKXvo+yOlrD4VYOOYuLdZJ36wa/5QIrtXvWs= github.com/jacobsa/timeutil v0.0.0-20170205232429-577e5acbbcf6/go.mod h1:JEWKD6V8xETMW+DEv+IQVz++f8Cn8O/X0HPeDY3qNis= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 h1:IPJ3dvxmJ4uczJe5YQdrYB16oTJlGSC/OyZDqUk9xX4= github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= @@ -484,6 +521,9 @@ github.com/juju/version v0.0.0-20191219164919-81c1be00b9a6/go.mod h1:kE8gK5X0CIm github.com/juju/version/v2 v2.0.0-20211007103408-2e8da085dc23/go.mod h1:Ljlbryh9sYaUSGXucslAEDf0A2XUSGvDbHJgW8ps6nc= github.com/julienschmidt/httprouter v1.1.1-0.20151013225520-77a895ad01eb/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -500,6 +540,7 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -527,6 +568,7 @@ github.com/masterzen/azure-sdk-for-go v3.2.0-beta.0.20161014135628-ee4f0065d00c+ github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= github.com/masterzen/winrm v0.0.0-20161014151040-7a535cd943fc/go.mod h1:CfZSN7zwz5gJiFhZJz49Uzk7mEBHIceWmbFmYx7Hf7E= github.com/masterzen/xmlpath v0.0.0-20140218185901-13f4951698ad/go.mod h1:A0zPC53iKKKcXYxr4ROjpQRQ5FgJXtelNdSmHHuq/tY= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -591,6 +633,7 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mjibson/go-dsp v0.0.0-20170104183934-49dba8372707 h1:onb6q16M5ft2+b3zu6YCkMRaWONPM5ofIIZI0OZWtps= github.com/mjibson/go-dsp v0.0.0-20170104183934-49dba8372707/go.mod h1:i/KKcxEWEO8Yyl11DYafRPKOPVYTrhxiTRigjtEEXZU= +github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -601,6 +644,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20180129072728-88b7b944be8b h1:juxXUBpBuF6yPbNHz/8Rv997YZIkeDy4AMb9P7xIqOc= github.com/nsf/termbox-go v0.0.0-20180129072728-88b7b944be8b/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= @@ -614,6 +658,7 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -634,6 +679,8 @@ github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfE github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306103835-530c669f7112+incompatible h1:RYNZrH30AoPmXBJhpFNB6UYE1ivwApG3VfScyQm3opA= github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306103835-530c669f7112+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -670,6 +717,9 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v2.20.2+incompatible h1:ucK79BhBpgqQxPASyS2cu9HX8cfDVljBN1WWFvbNvgY= github.com/shirou/gopsutil v2.20.2+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= @@ -689,6 +739,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= +github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= @@ -711,6 +763,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -733,6 +787,9 @@ github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU= github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f h1:9DDCDwOyEy/gId+IEMrFHLuQ5R/WV0KNxWLler8X2OY= github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f/go.mod h1:8sdOQnirw1PrcnTJYkmW1iOHtUmblMmGdUOHyWYycLI= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xwb1989/sqlparser v0.0.0-20171128062118-da747e0c62c4 h1:w96oitIHwAbUymu2zUSla/82gOKNzpJYkFdwCHE/UOA= @@ -766,26 +823,34 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -821,6 +886,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -862,9 +929,12 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -896,6 +966,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222171317-cd391775e71e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -932,19 +1003,28 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= @@ -956,6 +1036,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -983,6 +1064,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1018,6 +1100,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1118,6 +1202,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v1 v1.0.0-20161222125816-442357a80af5/go.mod h1:u0ALmqvLRxLI95fkdCEWrE6mhWYZW1aMOJHp5YXLHTg= @@ -1136,19 +1221,29 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170712054546-1be3d31502d6/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1175,6 +1270,7 @@ launchpad.net/xmlpath v0.0.0-20130614043138-000000000004/go.mod h1:vqyExLOM3qBx7 nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= diff --git a/server/api_cluster.go b/server/api_cluster.go index 3f13f68f3..a9613107f 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -8,6 +8,7 @@ package server import ( "encoding/json" + "fmt" "io" "net/http" "strconv" @@ -795,7 +796,8 @@ func (repman *ReplicationManager) handlerMuxSwitchover(w http.ResponseWriter, r mycluster.LogPrintf(cluster.LvlInfo, "Prefered master: not found in database servers %s", newPrefMaster) } mycluster.MasterFailover(false) - mycluster.SetPrefMaster(savedPrefMaster) + mycluster.Conf.PrefMaster = (savedPrefMaster) + } else { http.Error(w, "No cluster", 500) return @@ -1459,6 +1461,7 @@ func (repman *ReplicationManager) handlerMuxSettingsReload(w http.ResponseWriter } func (repman *ReplicationManager) handlerMuxServerAdd(w http.ResponseWriter, r *http.Request) { + fmt.Printf("HANDLER MUX SERVER ADD\n") w.Header().Set("Access-Control-Allow-Origin", "*") vars := mux.Vars(r) mycluster := repman.getClusterByName(vars["clusterName"]) diff --git a/server/server.go b/server/server.go index 367b718dd..7715644fa 100644 --- a/server/server.go +++ b/server/server.go @@ -459,6 +459,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { if strClusters != "" { //set cluster list repman.ClusterList = strings.Split(strClusters, ",") + repman.ImmuableFlagMaps["default"] = ImmuableMap //add config from cluster to the config map for _, cluster := range repman.ClusterList { //vipersave := backupvipersave @@ -513,7 +514,6 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab clusterconf := conf //conf.PrintConf() - //fmt.Printf("%+v\n", fistRead.AllSettings()) //if name cluster is defined if cluster != "" { @@ -529,15 +529,16 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab cf2.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) repman.initAlias(cf2) cf2.Unmarshal(&clusterconf) - } + //fmt.Printf("saved conf :") + //clusterconf.PrintConf() + //Add immuatable flag from cluster section + for _, f := range cf2.AllKeys() { + v := cf2.Get(f) + if v != nil { + clustImmuableMap[f] = v + } - //Add immuatable flag from cluster section - for _, f := range cf2.AllKeys() { - v := cf2.Get(f) - if v != nil { - clustImmuableMap[f] = v } - } //clusterconf.PrintConf() @@ -550,13 +551,22 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab confs.ConfImmuable = clusterconf //fmt.Printf("%+v\n", cf2.AllSettings()) - + repman.DynamicFlagMaps["default"] = clustDynamicMap //if dynamic config, load modified parameter from the saved config if clusterconf.ConfRewrite { + cf3 := fistRead.Sub("saved-" + cluster) + + //cf4 := repman.CleanupDynamicConfig(clustImmuableMap, cf3) if cf3 == nil { log.WithField("group", cluster).Info("Could not parse saved configuration group") } else { + for _, f := range cf3.AllKeys() { + v, ok := clustImmuableMap[f] + if ok { + cf3.Set(f, v) + } + } repman.initAlias(cf3) cf3.Unmarshal(&clusterconf) //to add flag in cluster dynamic map only if not defined yet or if the flag value read is diff from immuable flag value @@ -579,13 +589,29 @@ func (repman *ReplicationManager) GetClusterConfig(fistRead *viper.Viper, Immuab } repman.DynamicFlagMaps[cluster] = clustDynamicMap + confs.ConfInit = clusterconf + //fmt.Printf("init conf : ") + //clusterconf.PrintConf() repman.VersionConfs[cluster] = confs } return clusterconf } +/* +func CleanupDynamicConfig(clustImmuableMap map[string]interface{}, cf viper.Viper, cluster string) viper.Viper { + //if admin change immuable value that is already saved in dynamic config, we need to remove it before parse + for _, f := range cf.AllKeys() { + _, ok := clustImmuableMap[f] + if ok { + delete(cf.Get(f).(map[string]interface{}), f) + } + + } + +}*/ + func (repman *ReplicationManager) initAlias(v *viper.Viper) { v.RegisterAlias("monitoring-config-rewrite", "monitoring-save-config") v.RegisterAlias("api-user", "api-credentials") diff --git a/server/server_add.go b/server/server_add.go index 0fc0fcfe8..a63128d08 100644 --- a/server/server_add.go +++ b/server/server_add.go @@ -6,9 +6,14 @@ package server -import "github.com/signal18/replication-manager/config" +import ( + "fmt" + + "github.com/signal18/replication-manager/config" +) func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead string) error { + fmt.Printf("ADD CLUSTER\n") var myconf = make(map[string]config.Config) myconf[clusterName] = repman.Conf @@ -16,6 +21,13 @@ func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead str repman.ClusterList = append(repman.ClusterList, clusterName) //repman.ClusterList = repman.ClusterList repman.Confs[clusterName] = repman.Conf + + repman.VersionConfs[clusterName] = new(config.ConfVersion) + repman.VersionConfs[clusterName].ConfInit = repman.Conf + + repman.ImmuableFlagMaps[clusterName] = repman.ImmuableFlagMaps["default"] + repman.DynamicFlagMaps[clusterName] = repman.DynamicFlagMaps["default"] + repman.Unlock() /*file, err := os.OpenFile(repman.Conf.ClusterConfigPath+"/"+clusterName+".toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) if err != nil { @@ -29,9 +41,14 @@ func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead str if err != nil { return err }*/ + //confs[clusterName] = repman.GetClusterConfig(fistRead, repman.ImmuableFlagMaps["default"], repman.DynamicFlagMaps["default"], clusterName, conf) cluster, _ := repman.StartCluster(clusterName) + fmt.Printf("ADD CLUSTER CONF :\n") + cluster.Conf.PrintConf() + cluster.SetClusterHead(clusterHead) + //cluster.SetClusterHead(clusterName) cluster.SetClusterList(repman.Clusters) cluster.Save() diff --git a/server/server_monitor.go b/server/server_monitor.go index cab8055f0..92e6a9f13 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -58,9 +58,10 @@ func init() { } monitorCmd.Flags().StringVar(&conf.WorkingDir, "monitoring-datadir", "/var/lib/replication-manager", "Path to write temporary and persistent files") monitorCmd.Flags().Int64Var(&conf.MonitoringTicker, "monitoring-ticker", 2, "Monitoring interval in seconds") - monitorCmd.Flags().StringVar(&conf.TunnelHost, "monitoring-tunnel-host", "", "Bastion host to access to monitor topology via SSH tunnel host:22") - monitorCmd.Flags().StringVar(&conf.TunnelCredential, "monitoring-tunnel-credential", "root:", "Credential Access to bastion host topology via SSH tunnel") - monitorCmd.Flags().StringVar(&conf.TunnelKeyPath, "monitoring-tunnel-key-path", "/Users/apple/.ssh/id_rsa", "Tunnel private key path") + //not working so far + //monitorCmd.Flags().StringVar(&conf.TunnelHost, "monitoring-tunnel-host", "", "Bastion host to access to monitor topology via SSH tunnel host:22") + //monitorCmd.Flags().StringVar(&conf.TunnelCredential, "monitoring-tunnel-credential", "root:", "Credential Access to bastion host topology via SSH tunnel") + //monitorCmd.Flags().StringVar(&conf.TunnelKeyPath, "monitoring-tunnel-key-path", "/Users/apple/.ssh/id_rsa", "Tunnel private key path") monitorCmd.Flags().BoolVar(&conf.MonitorWriteHeartbeat, "monitoring-write-heartbeat", false, "Inject heartbeat into proxy or via external vip") monitorCmd.Flags().BoolVar(&conf.ConfRewrite, "monitoring-save-config", false, "Save configuration changes to / ") monitorCmd.Flags().StringVar(&conf.MonitorWriteHeartbeatCredential, "monitoring-write-heartbeat-credential", "", "Database user:password to inject traffic into proxy or via external vip") @@ -213,6 +214,9 @@ func init() { monitorCmd.Flags().StringVar(&conf.VaultMount, "vault-mount", "secret", "Vault mount for the secret") monitorCmd.Flags().StringVar(&conf.VaultAuth, "vault-auth", "approle", "Vault auth method : approle|userpass|ldap|token|github|alicloud|aws|azure|gcp|kerberos|kubernetes|radius") + monitorCmd.Flags().StringVar(&conf.GitUrl, "git-url", "", "GitHub URL repository to store config file") + monitorCmd.Flags().StringVar(&conf.GitAccesToken, "git-acces-token", "", "GitHub personnal acces token") + //monitorCmd.Flags().BoolVar(&conf.Daemon, "daemon", true, "Daemon mode. Do not start the Termbox console") conf.Daemon = true @@ -371,7 +375,7 @@ func init() { monitorCmd.Flags().BoolVar(&conf.BackupStreaming, "backup-streaming", false, "Backup streaming to cloud ") monitorCmd.Flags().BoolVar(&conf.BackupStreamingDebug, "backup-streaming-debug", false, "Debug mode for streaming to cloud ") monitorCmd.Flags().StringVar(&conf.BackupStreamingAwsAccessKeyId, "backup-streaming-aws-access-key-id", "admin", "Backup AWS key id") - monitorCmd.Flags().StringVar(&conf.BackupStreamingAwsAccessSecret, "backup-streaming-aws-access-secret", "secret", "Backup AWS key sercret") + monitorCmd.Flags().StringVar(&conf.BackupStreamingAwsAccessSecret, "backup-streaming-aws-access-secret", "secret", "Backup AWS key secret") monitorCmd.Flags().StringVar(&conf.BackupStreamingEndpoint, "backup-streaming-endpoint", "https://s3.signal18.io/", "Backup AWS endpoint") monitorCmd.Flags().StringVar(&conf.BackupStreamingRegion, "backup-streaming-region", "fr-1", "Backup AWS region") monitorCmd.Flags().StringVar(&conf.BackupStreamingBucket, "backup-streaming-bucket", "repman", "Backup AWS bucket") diff --git a/utils/crypto/crypto.go b/utils/crypto/crypto.go index 3649875f2..d524f82a8 100644 --- a/utils/crypto/crypto.go +++ b/utils/crypto/crypto.go @@ -53,7 +53,6 @@ func (p *Password) Encrypt() { } func (p *Password) Decrypt() error { - log.Println("COUCOU pass is: %s", p.CipherText) ciphertext, _ := hex.DecodeString(p.CipherText) block, err := aes.NewCipher(p.Key) From 72807013c1f46b4364a84eef28685e034da8a046 Mon Sep 17 00:00:00 2001 From: emma Date: Thu, 20 Apr 2023 09:54:26 +0200 Subject: [PATCH 16/39] fix isMasterFailed function --- cluster/cluster.go | 2 +- cluster/cluster_chk.go | 2 +- server/api_cluster.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 0ccec7976..f47ab74f7 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -611,7 +611,7 @@ func (cluster *Cluster) Run() { err := cluster.AddChildServers() if err != nil { - cluster.LogPrintf(LvlInfo, "Fail of AddChildServers %s", err) + cluster.LogPrintf(LvlDbg, "Fail of AddChildServers %s", err) } cluster.IsFailable = cluster.GetStatus() diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 631b6c2f6..8727cb829 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -121,7 +121,7 @@ func (cluster *Cluster) isAutomaticFailover() bool { func (cluster *Cluster) isMasterFailed() bool { if cluster.master == nil { - return true + return false } if cluster.master.State == stateFailed { return true diff --git a/server/api_cluster.go b/server/api_cluster.go index a9613107f..fd16d29c5 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -796,7 +796,7 @@ func (repman *ReplicationManager) handlerMuxSwitchover(w http.ResponseWriter, r mycluster.LogPrintf(cluster.LvlInfo, "Prefered master: not found in database servers %s", newPrefMaster) } mycluster.MasterFailover(false) - mycluster.Conf.PrefMaster = (savedPrefMaster) + mycluster.SetPrefMaster(savedPrefMaster) } else { http.Error(w, "No cluster", 500) From 39ea941022e26908ed2ee85c0fd0ecf9c5507cc1 Mon Sep 17 00:00:00 2001 From: emma Date: Wed, 26 Apr 2023 10:52:27 +0200 Subject: [PATCH 17/39] fix local orchestrator mode --- cluster/cluster.go | 152 +++++++++++++++++++-- cluster/cluster_has.go | 19 ++- cluster/cluster_wait.go | 3 + cluster/prov.go | 20 +-- cluster/prov_localhost_db.go | 11 +- cluster/prov_localhost_proxysql.go | 26 +++- cluster/prov_localhost_prx.go | 4 +- cluster/prx_mariadbshardproxy.go | 1 + cluster/prx_proxysql.go | 18 ++- cluster/srv.go | 4 + cluster/srv_set.go | 5 + etc/config.toml | 80 ++--------- etc/local/masterslave/proxysql/config.toml | 7 +- router/proxysql/proxysql.go | 6 +- server/server.go | 11 +- server/server_add.go | 8 +- server/server_monitor.go | 2 +- 17 files changed, 266 insertions(+), 111 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index f47ab74f7..f1687df78 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -466,7 +466,7 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface cluster.AddDBTagConfig("pkg") } //fmt.Printf("INIT CLUSTER CONF :\n") - cluster.Conf.PrintConf() + //cluster.Conf.PrintConf() return nil } @@ -538,13 +538,17 @@ func (cluster *Cluster) Run() { default: if cluster.Conf.LogLevel > 2 { cluster.LogPrintf(LvlDbg, "Monitoring server loop") - for k, v := range cluster.Servers { - cluster.LogPrintf(LvlDbg, "Server [%d]: URL: %-15s State: %6s PrevState: %6s", k, v.URL, v.State, v.PrevState) - } - if cluster.GetMaster() != nil { - cluster.LogPrintf(LvlDbg, "Master [ ]: URL: %-15s State: %6s PrevState: %6s", cluster.master.URL, cluster.GetMaster().State, cluster.GetMaster().PrevState) - for k, v := range cluster.slaves { - cluster.LogPrintf(LvlDbg, "Slave [%d]: URL: %-15s State: %6s PrevState: %6s", k, v.URL, v.State, v.PrevState) + if cluster.Servers[0] != nil { + cluster.LogPrintf(LvlDbg, "Servers not nil : %v\n", cluster.Servers) + for k, v := range cluster.Servers { + cluster.LogPrintf(LvlDbg, "Servers loops k : %d, url : %s, state : %s, prevstate %s", k, v.URL, v.State, v.PrevState) + cluster.LogPrintf(LvlDbg, "Server [%d]: URL: %-15s State: %6s PrevState: %6s", k, v.URL, v.State, v.PrevState) + } + if cluster.GetMaster() != nil { + cluster.LogPrintf(LvlDbg, "Master [ ]: URL: %-15s State: %6s PrevState: %6s", cluster.master.URL, cluster.GetMaster().State, cluster.GetMaster().PrevState) + for k, v := range cluster.slaves { + cluster.LogPrintf(LvlDbg, "Slave [%d]: URL: %-15s State: %6s PrevState: %6s", k, v.URL, v.State, v.PrevState) + } } } } @@ -913,9 +917,10 @@ func (cluster *Cluster) Save() error { } } - file.WriteString("[saved-" + cluster.Name + "]\n") + file.WriteString("[saved-" + cluster.Name + "]\ntitle = \"" + cluster.Name + "\" \n") s.WriteTo(file) - fmt.Printf("SAVE CLUSTER IMMUABLE MAP : %s", cluster.ImmuableFlagMap) + //fmt.Printf("SAVE CLUSTER IMMUABLE MAP : %s", cluster.ImmuableFlagMap) + //fmt.Printf("SAVE CLUSTER DYNAMIC MAP : %s", cluster.DynamicFlagMap) //to load the new generated config file in github if cluster.Conf.GitUrl != "" { @@ -963,6 +968,75 @@ func (cluster *Cluster) Save() error { return nil } +func (cluster *Cluster) SaveClusterFromScratch() error { + var myconf = make(map[string]config.Config) + + myconf[cluster.Name] = cluster.Conf + + file, err := os.OpenFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/"+cluster.Name+".toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + if err != nil { + if os.IsPermission(err) { + cluster.LogPrintf(LvlInfo, "File permission denied: %s", cluster.Conf.WorkingDir+"/"+cluster.Name+"/"+cluster.Name+".toml") + } + return err + } + defer file.Close() + + readconf, _ := toml.Marshal(cluster.Conf) + t, _ := toml.LoadBytes(readconf) + s := t + keys := t.Keys() + for _, key := range keys { + _, ok := cluster.ImmuableFlagMap[key] + if !ok { + s.Delete(key) + } else { + v, ok := cluster.DefaultFlagMap[key] + if ok && fmt.Sprintf("%v", s.Get(key)) == fmt.Sprintf("%v", v) { + s.Delete(key) + } + } + } + for _, key := range keys { + + _, ok := encryptFlag[key] + if ok { + v := s.Get(key) + str := fmt.Sprintf("%v", v) + tmp := strings.Split(str, ":") + if len(tmp) == 2 { + str = tmp[1] + v = tmp[0] + } + + p := crypto.Password{PlainText: str} + var err error + key_path, ok := cluster.ImmuableFlagMap["monitoring-key-path"] + if ok { + p.Key, err = crypto.ReadKey(fmt.Sprintf("%v", key_path)) + if err != nil { + log.Fatalln(err) + } + p.Encrypt() + + if len(tmp) == 2 { + str = fmt.Sprintf("%v", v) + str = str + p.CipherText + v = str + } else { + v = p.CipherText + } + } else { + cluster.LogPrintf(LvlWarn, "Missing key file or wrong key path") + } + + } + } + file.WriteString("[" + cluster.Name + "]\n title = \"" + cluster.Name + " \" \n") + s.WriteTo(file) + return nil +} + func (cluster *Cluster) Overwrite() error { if cluster.Conf.ConfRewrite { @@ -1017,7 +1091,7 @@ func (cluster *Cluster) InitAgent(conf config.Config) { return } -func (cluster *Cluster) ReloadConfig(conf config.Config) { +/*func (cluster *Cluster) ReloadConfig(conf config.Config) { cluster.Conf = conf cluster.Configurator.SetConfig(conf) cluster.sme.SetFailoverState() @@ -1032,6 +1106,19 @@ func (cluster *Cluster) ReloadConfig(conf config.Config) { cluster.newProxyList() cluster.sme.RemoveFailoverState() cluster.initProxies() +}*/ + +func (cluster *Cluster) ReloadConfig(conf config.Config) { + cluster.Conf = conf + cluster.Configurator.SetConfig(conf) + cluster.sme.SetFailoverState() + cluster.ResetStates() + + wg := new(sync.WaitGroup) + wg.Add(1) + go cluster.TopologyDiscover(wg) + wg.Wait() + } func (cluster *Cluster) FailoverForce() error { @@ -1401,3 +1488,46 @@ func (cluster *Cluster) ReloadCertificates() { pri.CertificatesReload() } } + +func (cluster *Cluster) ResetStates() { + cluster.slaves = nil + cluster.master = nil + cluster.oldMaster = nil + cluster.vmaster = nil + cluster.Servers = nil + cluster.Proxies = nil + // + cluster.ServerIdList = nil + cluster.hostList = nil + cluster.clusterList = nil + cluster.proxyList = nil + cluster.ProxyIdList = nil + //cluster.FailoverCtr = 0 + cluster.SetFailoverCtr(0) + //cluster.FailoverTs = 0 + cluster.SetFailTime(0) + cluster.Connections = 0 + cluster.SLAHistory = nil + // + cluster.Crashes = nil + + cluster.IsAllDbUp = false + cluster.IsDown = true + cluster.IsClusterDown = true + cluster.IsProvision = false + cluster.IsNotMonitoring = true + cluster.Topology = topoUnknown + + cluster.canFlashBack = true + cluster.CanInitNodes = true + cluster.CanConnectVault = true + cluster.runOnceAfterTopology = true + cluster.testStopCluster = true + cluster.testStartCluster = true + + cluster.SetUnDiscovered() + cluster.newServerList() + cluster.newProxyList() + cluster.sme.RemoveFailoverState() + cluster.initProxies() +} diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 05b254b61..2a121ee04 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -150,7 +150,7 @@ func (cluster *Cluster) HasAllDbUp() bool { } for _, s := range cluster.Servers { if s != nil { - if s.State == stateFailed /*&& misc.Contains(cluster.ignoreList, s.URL) == false*/ { + if s.State == stateFailed || s.State == stateErrorAuth /*&& misc.Contains(cluster.ignoreList, s.URL) == false*/ { return false } if s.State == stateSuspect && cluster.GetTopology() != topoUnknown { @@ -161,6 +161,23 @@ func (cluster *Cluster) HasAllDbUp() bool { if s.Conn == nil { return false } + + } + } + + return true +} + +func (cluster *Cluster) HasAllDbDown() bool { + if cluster.Servers == nil { + return true + } + for _, s := range cluster.Servers { + if s != nil { + if s.State != stateFailed /*&& misc.Contains(cluster.ignoreList, s.URL) == false*/ { + return false + } + } } diff --git a/cluster/cluster_wait.go b/cluster/cluster_wait.go index 4f16c84ef..b42ba5bf6 100644 --- a/cluster/cluster_wait.go +++ b/cluster/cluster_wait.go @@ -124,6 +124,9 @@ func (cluster *Cluster) WaitClusterStop() error { if cluster.sme.IsInState("ERR00021") == true { exitloop = 9999999 } + if cluster.HasAllDbDown() { + exitloop = 9999999 + } } } diff --git a/cluster/prov.go b/cluster/prov.go index e0bfdeb7e..3f96c81b4 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -24,10 +24,6 @@ func (cluster *Cluster) Bootstrap() error { if err != nil { return err } - err = cluster.WaitDatabaseCanConn() - if err != nil { - return err - } err = cluster.BootstrapReplication(true) if err != nil { @@ -99,7 +95,10 @@ func (cluster *Cluster) ProvisionServices() error { } } } - + err := cluster.WaitDatabaseCanConn() + if err != nil { + return err + } for _, prx := range cluster.Proxies { switch cluster.GetOrchestrator() { case config.ConstOrchestratorOpenSVC: @@ -230,11 +229,12 @@ func (cluster *Cluster) Unprovision() error { } } } - cluster.slaves = nil - cluster.master = nil - cluster.vmaster = nil - cluster.IsAllDbUp = false - cluster.sme.RemoveFailoverState() + err := cluster.WaitClusterStop() + if err == nil { + cluster.ResetStates() + } else { + cluster.LogPrintf(LvlErr, "Failed to wait for all databases down : %s", err) + } for _, prx := range cluster.Proxies { /* prx, ok := pri.(*Proxy) diff --git a/cluster/prov_localhost_db.go b/cluster/prov_localhost_db.go index 760dde617..775b310d7 100644 --- a/cluster/prov_localhost_db.go +++ b/cluster/prov_localhost_db.go @@ -31,6 +31,7 @@ func (cluster *Cluster) LocalhostUnprovisionDatabaseService(server *ServerMonito return err } cluster.LogPrintf(LvlInfo, "Remove datadir done: %s", out.Bytes()) + cluster.master = nil cluster.errorChan <- nil return nil } @@ -173,7 +174,7 @@ func (cluster *Cluster) LocalhostStartDatabaseServiceFistTime(server *ServerMoni return errors.New("mysqld --version not found ") } time.Sleep(time.Millisecond * 2000) - if !strings.Contains(version, "mariadb") { + if strings.Contains(version, "mariadb") { user = "root" } mariadbdCmd := exec.Command(cluster.Conf.ProvDBBinaryBasedir+"/mysqld", "--defaults-file="+server.Datadir+"/init/etc/mysql/my.cnf", "--port="+server.Port, "--server-id="+server.Port, "--datadir="+path, "--socket="+server.GetDatabaseSocket(), "--user="+user, "--bind-address=0.0.0.0", "--pid_file="+path+"/"+server.Id+".pid") @@ -203,6 +204,7 @@ func (cluster *Cluster) LocalhostStartDatabaseServiceFistTime(server *ServerMoni } dsn := user + ":@unix(" + server.GetDatabaseSocket() + ")/?timeout=15s" conn, err2 := sqlx.Open("mysql", dsn) + cluster.LogPrintf(LvlDbg, "DNS start prov localhost first time : %s\n", dsn) if err2 == nil { defer conn.Close() _, err := conn.Exec("set sql_log_bin=0") @@ -262,9 +264,16 @@ func (cluster *Cluster) LocalhostStartDatabaseServiceFistTime(server *ServerMoni cluster.LogPrintf(LvlErr, " %s %s ", "flush privileges", err) } + _, err = conn.Exec("reset master") + if err != nil { + haveerror = true + cluster.LogPrintf(LvlErr, " %s %s ", "reset master", err) + } + if !haveerror { exitloop = 100 } + } else { cluster.LogPrintf(LvlErr, "Database connection to init user %s ", err2) } diff --git a/cluster/prov_localhost_proxysql.go b/cluster/prov_localhost_proxysql.go index de9cfc337..76f066ec8 100644 --- a/cluster/prov_localhost_proxysql.go +++ b/cluster/prov_localhost_proxysql.go @@ -17,7 +17,23 @@ import ( // TODO: Make all of these part of ProxySQLProxy and not Cluster func (cluster *Cluster) LocalhostUnprovisionProxySQLService(prx *ProxySQLProxy) error { - cluster.LocalhostStopProxysqlService(prx) + cluster.LocalhostStopProxySQLService(prx) + + out := &bytes.Buffer{} + path := prx.Datadir //+ "/var" + //os.RemoveAll(path) + + cmd := exec.Command("rm", "-rf", path) + + cmd.Stdout = out + err := cmd.Run() + if err != nil { + cluster.LogPrintf(LvlErr, "%s", err) + cluster.errorChan <- err + return err + } + cluster.LogPrintf(LvlInfo, "Remove datadir done: %s", out.Bytes()) + cluster.errorChan <- nil return nil } @@ -51,12 +67,12 @@ func (cluster *Cluster) LocalhostProvisionProxySQLService(prx *ProxySQLProxy) er return nil } -func (cluster *Cluster) LocalhostStopProxysqlService(prx *ProxySQLProxy) error { +func (cluster *Cluster) LocalhostStopProxySQLService(prx *ProxySQLProxy) error { // cluster.LogPrintf("TEST", "Killing database %s %d", server.Id, server.Process.Pid) - - killCmd := exec.Command("kill", "-9", fmt.Sprintf("%d", prx.Process.Pid)) - killCmd.Run() + prx.Shutdown() + //killCmd := exec.Command("kill", "-9", fmt.Sprintf("%d", prx.Process.Pid)) + //killCmd.Run() return nil } diff --git a/cluster/prov_localhost_prx.go b/cluster/prov_localhost_prx.go index 65128820a..12cc17ced 100644 --- a/cluster/prov_localhost_prx.go +++ b/cluster/prov_localhost_prx.go @@ -85,10 +85,10 @@ func (cluster *Cluster) LocalhostStartProxyService(pri DatabaseProxy) error { func (cluster *Cluster) LocalhostStopProxyService(pri DatabaseProxy) error { if prx, ok := pri.(*HaproxyProxy); ok { - cluster.LocalhostStartHaProxyService(prx) + cluster.LocalhostStopHaProxyService(prx) } if prx, ok := pri.(*ProxySQLProxy); ok { - cluster.LocalhostStartProxySQLService(prx) + cluster.LocalhostStopProxySQLService(prx) } return nil diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index 328b5b8c8..93006e5c8 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -212,6 +212,7 @@ func (proxy *MariadbShardProxy) Refresh() error { wg.Add(1) go proxy.ShardProxy.Ping(wg) wg.Wait() + err := proxy.Refresh() if err != nil { //proxy.ClusterGroup.LogPrintf(LvlErr, "Sharding proxy refresh error (%s)", err) diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 5eab1b550..4346434b4 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -325,8 +325,7 @@ func (proxy *ProxySQLProxy) Refresh() error { } // nothing should be done if no bootstrap - if cluster.Conf.ProxysqlBootstrap { - + if cluster.Conf.ProxysqlBootstrap && cluster.IsDiscovered() { // if ProxySQL and replication-manager states differ, resolve the conflict if bke.PrxStatus == "OFFLINE_HARD" && s.State == stateSlave && !s.IsIgnored() { if cluster.Conf.ProxysqlDebug { @@ -626,3 +625,18 @@ func (proxy *ProxySQLProxy) RotationAdminPasswords(password string) { } } + +func (proxy *ProxySQLProxy) Shutdown() { + cluster := proxy.ClusterGroup + psql, err := proxy.Connect() + if err != nil { + cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + return + } + defer psql.Connection.Close() + + err = psql.Shutdown() + if err != nil { + cluster.LogPrintf(LvlErr, "ProxySQL could not shutdown (%s)", err) + } +} diff --git a/cluster/srv.go b/cluster/srv.go index 0522623ae..3ec9961c4 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -414,6 +414,10 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { // From here we have a new connection + if server.State == stateErrorAuth { + server.SetState(stateSuspect) + } + // For orchestrator to trigger a start via tracking state URL if server.PrevState == stateFailed { server.DelWaitStartCookie() diff --git a/cluster/srv_set.go b/cluster/srv_set.go index 11140d68d..55b93a8d8 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -80,6 +80,11 @@ func (server *ServerMonitor) SetFailed() { func (server *ServerMonitor) SetMaster() { server.SetState(stateMaster) + //server.ClusterGroup.LogPrintf(LvlInfo, "Server %s state transition from %s changed to: %s in SetMaster", server.URL, server.PrevState, stateMaster) + _, file, no, ok := runtime.Caller(1) + if ok { + server.ClusterGroup.LogPrintf(LvlDbg, "SetMaster called from %s#%d\n", file, no) + } for _, s := range server.ClusterGroup.Servers { s.HaveNoMasterOnStart = false } diff --git a/etc/config.toml b/etc/config.toml index 7185924b4..1f0edfafa 100644 --- a/etc/config.toml +++ b/etc/config.toml @@ -1,10 +1,21 @@ [Default] -include = "/etc/replication-manager/cluster.d" +include = "./etc/local/masterslave/proxysql" +prov-orchestrator = "local" -monitoring-save-config = false -monitoring-datadir = "/var/lib/replication-manager" -#monitoring-sharedir = "/usr/share/replication-manager" +#mariadb +prov-db-binary-basedir= "/usr/sbin" +prov-db-client-basedir= "/usr/bin" +backup-mysqlclient-path ="/usr/bin/mysql" +backup-mysqlbinlog-path = "/usr/bin/mysqlbinlog" +backup-mysqldump-options = "--hex-blob --single-transaction --verbose --all-databases --add-drop-database" + +monitoring-save-config = true +monitoring-key-path = ".replication-manager.key" + +monitoring-sharedir="./share" +monitoring-datadir="/home/emma/repdata" +http-root="./dashboard" ## Timeout in seconds between consecutive monitoring @@ -16,22 +27,7 @@ monitoring-ticker = 2 log-file = "/var/log/replication-manager.log" log-heartbeat = false -log-syslog = false - - - -################# -## ARBITRATION ## -################# -arbitration-external = false -arbitration-external-secret = "13787932529099014144" -arbitration-external-hosts = "88.191.151.84:80" -arbitration-peer-hosts ="127.0.0.1:10002" - -## Unique value on each replication-manager - -arbitration-external-unique-id = 0 ########## ## HTTP ## @@ -51,51 +47,5 @@ http-refresh-interval = 4000 api-credentials = "admin:repman" api-port = "10005" -api-https-bind = false - -api-credentials-acl-allow = "admin:cluster proxy db prov,dba:cluster proxy db,foo:" -api-credentials-acl-discard = false -api-credentials-external = "dba:repman,foo:bar" - -############ -## ALERTS ## -############ - -mail-from = "replication-manager@localhost" -mail-smtp-addr = "localhost:25" -mail-to = "replication-manager@signal18.io" -mail-smtp-password="" -mail-smtp-user="" - -alert-slack-channel = "#support" -alert-slack-url = "" -alert-slack-user = "svar" - -########## -# STATS ## -########## - -graphite-metrics = false -graphite-carbon-host = "127.0.0.1" -graphite-carbon-port = 2003 -graphite-embedded = false -graphite-carbon-api-port = 10002 -graphite-carbon-server-port = 10003 -graphite-carbon-link-port = 7002 -graphite-carbon-pickle-port = 2004 -graphite-carbon-pprof-port = 7007 - -backup-mydumper-path = "/usr/local/bin/mydumper" -backup-myloader-path = "/usr/local/bin/myloader" -backup-mysqlbinlog-path = "/usr/local/bin/mysqlbinlog" -backup-mysqldump-path = "/usr/local/bin/mysqldump" - -############## -# BENCHMARK ## -############## -sysbench-binary-path = "/usr/bin/sysbench" -sysbench-threads = 4 -sysbench-time = 100 -sysbench-v1 = true diff --git a/etc/local/masterslave/proxysql/config.toml b/etc/local/masterslave/proxysql/config.toml index 370cbcf3e..5e11dff12 100644 --- a/etc/local/masterslave/proxysql/config.toml +++ b/etc/local/masterslave/proxysql/config.toml @@ -4,8 +4,8 @@ ## change the service file in /etc/systemd/system/replication-manager.service to looks like : ## replication-manager-osc --config=./etc/config.toml.sample --cluster=Cluster01,Cluster_Test_2_Nodes monitor -[MasterSlaveProxysql] -title = "MasterSlaveProxysql" +[emma] +title = "emma" prov-orchestrator = "local" db-servers-hosts = "127.0.0.1:3313,127.0.0.1:3314" @@ -21,5 +21,6 @@ proxysql-port=3303 proxysql-admin-port = 6032 proxysql-servers="127.0.0.1" proxysql-multiplexing = false -proxysql-password = "admin" proxysql-user = "admin" +proxysql-password = "admin" +proxysql-binary-path = "/usr/bin/proxysql" diff --git a/router/proxysql/proxysql.go b/router/proxysql/proxysql.go index 551f15745..0a3dbe1d9 100644 --- a/router/proxysql/proxysql.go +++ b/router/proxysql/proxysql.go @@ -65,7 +65,6 @@ func (psql *ProxySQL) Connect() error { ReadTimeout: time.Second * 15, AllowNativePasswords: true, } - var err error psql.Connection, err = sqlx.Connect("mysql", ProxysqlConfig.FormatDSN()) if err != nil { @@ -359,3 +358,8 @@ func (psql *ProxySQL) SaveMySQLUsersToDisk() error { _, err := psql.Connection.Exec("SAVE MYSQL USERS TO DISK") return err } + +func (psql *ProxySQL) Shutdown() error { + _, err := psql.Connection.Exec("PROXYSQL KILL") + return err +} diff --git a/server/server.go b/server/server.go index 7715644fa..d931482c2 100644 --- a/server/server.go +++ b/server/server.go @@ -423,6 +423,8 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { var strClusters string strClusters = cfgGroup + //fmt.Printf("%+v\n", fistRead.AllSettings()) + //if cluster name is empty, go discover cluster if strClusters == "" { // Discovering the clusters from all merged conf files build clusterDiscovery map @@ -434,7 +436,7 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //extract the default section of the config files cf1 := fistRead.Sub("Default") //init viper to save the config - vipersave := viper.GetViper() + //vipersave := viper.GetViper() //cf1.Debug() if cf1 == nil { @@ -445,9 +447,10 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { cf1.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) cf1.SetEnvPrefix("DEFAULT") - vipersave.MergeConfigMap(cf1.AllSettings()) - //fmt.Printf("%+v\n", vipersave.AllSettings()) - vipersave.Unmarshal(&conf) + //vipersave.MergeConfigMap(cf1.AllSettings()) + //fmt.Printf("%+v\n", cf1.AllSettings()) + //vipersave.Unmarshal(&conf) + cf1.Unmarshal(&conf) // fmt.Printf("%+v\n", conf) //os.Exit(3) //conf.PrintConf() diff --git a/server/server_add.go b/server/server_add.go index a63128d08..f3829eb8b 100644 --- a/server/server_add.go +++ b/server/server_add.go @@ -7,13 +7,10 @@ package server import ( - "fmt" - "github.com/signal18/replication-manager/config" ) func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead string) error { - fmt.Printf("ADD CLUSTER\n") var myconf = make(map[string]config.Config) myconf[clusterName] = repman.Conf @@ -44,8 +41,9 @@ func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead str //confs[clusterName] = repman.GetClusterConfig(fistRead, repman.ImmuableFlagMaps["default"], repman.DynamicFlagMaps["default"], clusterName, conf) cluster, _ := repman.StartCluster(clusterName) - fmt.Printf("ADD CLUSTER CONF :\n") - cluster.Conf.PrintConf() + //fmt.Printf("ADD CLUSTER def map :\n") + //fmt.Printf("%s\n", repman.ImmuableFlagMaps) + //cluster.Conf.PrintConf() cluster.SetClusterHead(clusterHead) //cluster.SetClusterHead(clusterName) diff --git a/server/server_monitor.go b/server/server_monitor.go index 92e6a9f13..1fb9e3eae 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -407,7 +407,7 @@ func init() { monitorCmd.Flags().StringVar(&conf.ProvIopsLatency, "prov-db-disk-iops-latency", "0.002", "IO latency in s") monitorCmd.Flags().StringVar(&conf.ProvCores, "prov-db-cpu-cores", "1", "Number of cpu cores for the micro service VM") monitorCmd.Flags().BoolVar(&conf.ProvDBApplyDynamicConfig, "prov-db-apply-dynamic-config", false, "Dynamic database config change") - monitorCmd.Flags().StringVar(&conf.ProvTags, "prov-db-tags", "semisync,row,innodb,noquerycache,threadpool,slow,pfs,docker,linux,readonly,diskmonitor,sqlerror,compressbinlog", "playbook configuration tags") + monitorCmd.Flags().StringVar(&conf.ProvTags, "prov-db-tags", "semisync,row,innodb,noquerycache,threadpool,slow,pfs,docker,linux,readonly,diskmonitor,sqlerror,compressbinlog,readonly", "playbook configuration tags") monitorCmd.Flags().StringVar(&conf.ProvDomain, "prov-db-domain", "0", "Config domain id for the cluster") monitorCmd.Flags().StringVar(&conf.ProvMem, "prov-db-memory", "256", "Memory in M for micro service VM") monitorCmd.Flags().StringVar(&conf.ProvMemSharedPct, "prov-db-memory-shared-pct", "threads:16,innodb:60,myisam:10,aria:10,rocksdb:1,tokudb:1,s3:1,archive:1,querycache:0", "% memory shared per buffer") From 6c2261a630f083ba73ad74814a16210304ceebe1 Mon Sep 17 00:00:00 2001 From: emma Date: Thu, 27 Apr 2023 15:58:01 +0200 Subject: [PATCH 18/39] fix opensvc unprovisionning and provision multiple times --- cluster/cluster.go | 3 +-- cluster/cluster_has.go | 2 +- cluster/srv.go | 7 ++++--- etc/config.toml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index f1687df78..8a6dfb894 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -1490,6 +1490,7 @@ func (cluster *Cluster) ReloadCertificates() { } func (cluster *Cluster) ResetStates() { + cluster.SetUnDiscovered() cluster.slaves = nil cluster.master = nil cluster.oldMaster = nil @@ -1516,7 +1517,6 @@ func (cluster *Cluster) ResetStates() { cluster.IsClusterDown = true cluster.IsProvision = false cluster.IsNotMonitoring = true - cluster.Topology = topoUnknown cluster.canFlashBack = true cluster.CanInitNodes = true @@ -1525,7 +1525,6 @@ func (cluster *Cluster) ResetStates() { cluster.testStopCluster = true cluster.testStartCluster = true - cluster.SetUnDiscovered() cluster.newServerList() cluster.newProxyList() cluster.sme.RemoveFailoverState() diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 2a121ee04..06c5015d0 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -150,7 +150,7 @@ func (cluster *Cluster) HasAllDbUp() bool { } for _, s := range cluster.Servers { if s != nil { - if s.State == stateFailed || s.State == stateErrorAuth /*&& misc.Contains(cluster.ignoreList, s.URL) == false*/ { + if s.State == stateFailed /*|| s.State == stateErrorAuth && misc.Contains(cluster.ignoreList, s.URL) == false*/ { return false } if s.State == stateSuspect && cluster.GetTopology() != topoUnknown { diff --git a/cluster/srv.go b/cluster/srv.go index 3ec9961c4..4f41095b6 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -355,7 +355,7 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { if server.ClusterGroup.master == nil { server.ClusterGroup.LogPrintf(LvlDbg, "Master not defined") } - if server.ClusterGroup.GetMaster() != nil && server.URL == server.ClusterGroup.GetMaster().URL { + if server.ClusterGroup.GetMaster() != nil && server.URL == server.ClusterGroup.GetMaster().URL && server.GetCluster().GetTopology() != topoUnknown { server.FailSuspectHeartbeat = server.ClusterGroup.sme.GetHeartbeats() if server.ClusterGroup.GetMaster().FailCount <= server.ClusterGroup.Conf.MaxFail { server.ClusterGroup.LogPrintf("INFO", "Master Failure detected! Retry %d/%d", server.ClusterGroup.master.FailCount, server.ClusterGroup.Conf.MaxFail) @@ -374,7 +374,7 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { } else { // not the master or a virtual master server.ClusterGroup.LogPrintf(LvlDbg, "Failure detection of no master FailCount %d MaxFail %d", server.FailCount, server.ClusterGroup.Conf.MaxFail) - if server.FailCount >= server.ClusterGroup.Conf.MaxFail { + if server.FailCount >= server.ClusterGroup.Conf.MaxFail && server.GetCluster().GetTopology() != topoUnknown { if server.FailCount == server.ClusterGroup.Conf.MaxFail { server.ClusterGroup.LogPrintf("INFO", "Declaring replica %s as failed", server.URL) server.SetState(stateFailed) @@ -414,7 +414,8 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { // From here we have a new connection - if server.State == stateErrorAuth { + //Without topology we should never declare a server failed + if (server.State == stateErrorAuth || server.State == stateFailed) && server.GetCluster().GetTopology() == topoUnknown && server.PrevState != stateSuspect { server.SetState(stateSuspect) } diff --git a/etc/config.toml b/etc/config.toml index 1f0edfafa..1a149b950 100644 --- a/etc/config.toml +++ b/etc/config.toml @@ -10,8 +10,8 @@ backup-mysqlclient-path ="/usr/bin/mysql" backup-mysqlbinlog-path = "/usr/bin/mysqlbinlog" backup-mysqldump-options = "--hex-blob --single-transaction --verbose --all-databases --add-drop-database" -monitoring-save-config = true -monitoring-key-path = ".replication-manager.key" +#monitoring-save-config = true +#monitoring-key-path = ".replication-manager.key" monitoring-sharedir="./share" monitoring-datadir="/home/emma/repdata" From 7b9d1e94eca1192703feaf60b6a250fcffb91408 Mon Sep 17 00:00:00 2001 From: emma Date: Fri, 28 Apr 2023 12:46:39 +0200 Subject: [PATCH 19/39] add pull/clone from github during init config --- cluster/cluster.go | 215 ++++++++++++++------- etc/local/masterslave/proxysql/config.toml | 26 --- server/server.go | 18 +- server/server_cmd.go | 16 ++ 4 files changed, 175 insertions(+), 100 deletions(-) delete mode 100644 etc/local/masterslave/proxysql/config.toml diff --git a/cluster/cluster.go b/cluster/cluster.go index 8a6dfb894..384d0b204 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -814,51 +814,19 @@ func (cluster *Cluster) Save() error { if cluster.Conf.ConfRewrite { //clone git repository in case its the first time if cluster.Conf.GitUrl != "" { - if _, err := os.Stat(cluster.Conf.WorkingDir + "/" + cluster.Name + "/.git"); err == nil { - path := cluster.Conf.WorkingDir + "/" + cluster.Name - - // We instantiate a new repository targeting the given path (the .git folder) - r, err := git.PlainOpen(path) - git_ex.CheckIfError(err) - - // Get the working directory for the repository - w, err := r.Worktree() - git_ex.CheckIfError(err) - - // Pull the latest changes from the origin remote and merge into the current branch - git_ex.Info("git pull origin") - err = w.Pull(&git.PullOptions{RemoteName: "origin"}) - //git_ex.CheckIfError(err) - } else { - url := cluster.Conf.GitUrl - directory := cluster.Conf.WorkingDir + "/" + cluster.Name - - // Clone the given repository to the given directory - git_ex.Info("git clone %s %s --recursive", url, directory) - - _, err := git.PlainClone(directory, false, &git.CloneOptions{ - URL: url, - RecurseSubmodules: git.DefaultSubmoduleRecursionDepth, - Auth: &git_http.BasicAuth{ - Username: "replication-manager", // yes, this can be anything except an empty string - Password: cluster.Conf.GitAccesToken, - }, - }) - - git_ex.CheckIfError(err) - } - + CloneConfigFromGit(cluster.Conf.GitUrl, cluster.Conf.GitAccesToken, cluster.WorkingDir, cluster.Name) } + //fmt.Printf("SAVE CLUSTER \n") //cluster.Conf.PrintConf() var myconf = make(map[string]config.Config) myconf["saved-"+cluster.Name] = cluster.Conf - file, err := os.OpenFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/config.toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + file, err := os.OpenFile(cluster.Conf.WorkingDir+"/"+cluster.Name+"/"+cluster.Name+".toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) if err != nil { if os.IsPermission(err) { - cluster.LogPrintf(LvlInfo, "File permission denied: %s", cluster.Conf.WorkingDir+"/"+cluster.Name+"/config.toml") + cluster.LogPrintf(LvlInfo, "File permission denied: %s", cluster.Conf.WorkingDir+"/"+cluster.Name+"/"+cluster.Name+".toml") } return err } @@ -924,38 +892,51 @@ func (cluster *Cluster) Save() error { //to load the new generated config file in github if cluster.Conf.GitUrl != "" { - directory := cluster.Conf.WorkingDir + "/" + cluster.Name - r, err := git.PlainOpen(directory) - git_ex.CheckIfError(err) - - w, err := r.Worktree() - git_ex.CheckIfError(err) - - msg := "Update config.toml file" - - // Adds the new file to the staging area. - git_ex.Info("git add" + directory + "/config.toml") - _, err = w.Add("config.toml") - git_ex.CheckIfError(err) - - git_ex.Info("git commit -m \"New config file\"") - _, err = w.Commit(msg, &git.CommitOptions{ - Author: &git_obj.Signature{ - Name: "Replication-manager", - Email: cluster.Conf.MailFrom, - When: time.Now(), - }, - }) - - git_ex.CheckIfError(err) - - git_ex.Info("git push") - // push using default options - err = r.Push(&git.PushOptions{Auth: &git_http.BasicAuth{ - Username: "toto", // yes, this can be anything except an empty string - Password: cluster.Conf.GitAccesToken, - }}) - git_ex.CheckIfError(err) + /* + directory := cluster.Conf.WorkingDir + "/" + cluster.Name + r, err := git.PlainOpen(directory) + if err != nil { + log.Errorf("Git error : cannot PlainOpen : %s", err) + } + + w, err := r.Worktree() + if err != nil { + log.Errorf("Git error : cannot Worktree : %s", err) + } + + msg := "Update " + cluster.Name + ".toml file" + + // Adds the new file to the staging area. + git_ex.Info("git add " + cluster.Name + ".toml") + _, err = w.Add(cluster.Name + ".toml") + if err != nil { + log.Errorf("Git error : cannot Add %s : %s", cluster.Name+".toml", err) + } + + git_ex.Info("git commit -a -m \"New config file\"") + _, err = w.Commit(msg, &git.CommitOptions{ + Author: &git_obj.Signature{ + Name: "Replication-manager", + Email: cluster.Conf.MailFrom, + When: time.Now(), + }, + }) + + if err != nil { + log.Errorf("Git error : cannot Commit : %s", err) + } + + git_ex.Info("git push") + // push using default options + err = r.Push(&git.PushOptions{Auth: &git_http.BasicAuth{ + Username: "toto", // yes, this can be anything except an empty string + Password: cluster.Conf.GitAccesToken, + }}) + if err != nil { + log.Errorf("Git error : cannot Push : %s", err) + + }*/ + PushConfigToGit(cluster.Conf.GitAccesToken, cluster.WorkingDir, cluster.Name) } @@ -968,6 +949,104 @@ func (cluster *Cluster) Save() error { return nil } +func PushConfigToGit(tok string, dir string, name string) { + path := dir + if !strings.Contains(dir, name) { + path += "/" + name + } + //directory := cluster.Conf.WorkingDir + "/" + cluster.Name + r, err := git.PlainOpen(path) + if err != nil { + log.Errorf("Git error : cannot PlainOpen : %s", err) + } + + w, err := r.Worktree() + if err != nil { + log.Errorf("Git error : cannot Worktree : %s", err) + } + + msg := "Update " + name + ".toml file" + + // Adds the new file to the staging area. + git_ex.Info("git add " + name + ".toml") + _, err = w.Add(name + ".toml") + if err != nil { + log.Errorf("Git error : cannot Add %s : %s", name+".toml", err) + } + + git_ex.Info("git commit -a -m \"New config file\"") + _, err = w.Commit(msg, &git.CommitOptions{ + Author: &git_obj.Signature{ + Name: "Replication-manager", + When: time.Now(), + }, + }) + + if err != nil { + log.Errorf("Git error : cannot Commit : %s", err) + } + + git_ex.Info("git push") + // push using default options + err = r.Push(&git.PushOptions{Auth: &git_http.BasicAuth{ + Username: "toto", // yes, this can be anything except an empty string + Password: tok, + }}) + if err != nil { + log.Errorf("Git error : cannot Push : %s", err) + + } +} + +func CloneConfigFromGit(url string, tok string, dir string, name string) { + //fmt.Printf("Clone from git : url %s, tok %s, dir %s, name %s\n", url, tok, dir, name) + path := dir + if !strings.Contains(dir, name) { + path += "/" + name + } + if _, err := os.Stat(path + "/.git"); err == nil { + + // We instantiate a new repository targeting the given path (the .git folder) + r, err := git.PlainOpen(path) + if err != nil { + log.Errorf("Git error : cannot PlainOpen : %s", err) + return + } + + // Get the working directory for the repository + w, err := r.Worktree() + if err != nil { + log.Errorf("Git error : cannot Worktree : %s", err) + return + } + + // Pull the latest changes from the origin remote and merge into the current branch + git_ex.Info("git pull origin") + err = w.Pull(&git.PullOptions{RemoteName: "origin"}) + + if err != nil && fmt.Sprintf("%v", err) != "already up-to-date" { + log.Errorf("Git error : cannot Pull : %s", err) + } + + } else { + // Clone the given repository to the given directory + git_ex.Info("git clone %s %s --recursive", url, path) + + _, err := git.PlainClone(path, false, &git.CloneOptions{ + URL: url, + RecurseSubmodules: git.DefaultSubmoduleRecursionDepth, + Auth: &git_http.BasicAuth{ + Username: "replication-manager", // yes, this can be anything except an empty string + Password: tok, + }, + }) + + if err != nil { + log.Errorf("Git error : cannot Clone %s repository : %s", url, err) + } + } +} + func (cluster *Cluster) SaveClusterFromScratch() error { var myconf = make(map[string]config.Config) diff --git a/etc/local/masterslave/proxysql/config.toml b/etc/local/masterslave/proxysql/config.toml deleted file mode 100644 index 5e11dff12..000000000 --- a/etc/local/masterslave/proxysql/config.toml +++ /dev/null @@ -1,26 +0,0 @@ -## config.toml -## Example replication-manager configuration file - -## change the service file in /etc/systemd/system/replication-manager.service to looks like : -## replication-manager-osc --config=./etc/config.toml.sample --cluster=Cluster01,Cluster_Test_2_Nodes monitor - -[emma] -title = "emma" - -prov-orchestrator = "local" -db-servers-hosts = "127.0.0.1:3313,127.0.0.1:3314" -db-servers-prefered-master = "127.0.0.1:3313" -db-servers-credential = "root:mariadb" -db-servers-connect-timeout = 1 -replication-credential = "root:mariadb" - -proxysql = true -proxysql-bootstrap = true -proxysql-bootstrap-hostgroups = true -proxysql-port=3303 -proxysql-admin-port = 6032 -proxysql-servers="127.0.0.1" -proxysql-multiplexing = false -proxysql-user = "admin" -proxysql-password = "admin" -proxysql-binary-path = "/usr/bin/proxysql" diff --git a/server/server.go b/server/server.go index d931482c2..4cfb228c6 100644 --- a/server/server.go +++ b/server/server.go @@ -396,18 +396,24 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //read and set config from all files in the working dir for _, f := range files { if f.IsDir() && f.Name() != "graphite" { + //load config file from git hub + if fistRead.GetString("default.git-url") != "" && fistRead.GetString("default.git-acces-token") != "" { + cluster.CloneConfigFromGit(fistRead.GetString("default.git-url"), fistRead.GetString("default.git-acces-token"), conf.WorkingDir, f.Name()) + } + //fistRead.SetConfigName(f.Name()) dynRead.SetConfigName(f.Name()) - if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/config.toml"); os.IsNotExist(err) { - log.Warning("No monitoring saved config found " + conf.WorkingDir + "/" + f.Name() + "/config.toml") + if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml"); os.IsNotExist(err) { + log.Warning("No monitoring saved config found " + conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") } else { - log.Infof("Parsing saved config from working directory %s ", conf.WorkingDir+"/"+f.Name()+"/config.toml") - fistRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/config.toml") - dynRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/config.toml") + + log.Infof("Parsing saved config from working directory %s ", conf.WorkingDir+"/"+f.Name()+"/"+f.Name()+".toml") + fistRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") + dynRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") err := fistRead.MergeInConfig() err = dynRead.MergeInConfig() if err != nil { - log.Fatal("Config error in " + conf.WorkingDir + "/" + f.Name() + "/config.toml" + ":" + err.Error()) + log.Fatal("Config error in " + conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml" + ":" + err.Error()) } } } diff --git a/server/server_cmd.go b/server/server_cmd.go index d840ccdb1..9ddc5bfa1 100644 --- a/server/server_cmd.go +++ b/server/server_cmd.go @@ -73,6 +73,18 @@ var versionCmd = &cobra.Command{ }, } +var configMergeCmd = &cobra.Command{ + Use: "config-merge", + Short: "Merges the initial configuration file with the dynamic one", + Long: `Merges all parameters modified in dynamic mode with the original parameters (including immutable parameters) by merging the config files generated by the dynamic mode. Be careful, this command overwrites the original config file.`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Start config merge command !!\n") + fmt.Println("Cluster: ", cfgGroup) + fmt.Println("Config : ", conf.ConfigFile) + + }, +} + func init() { conf.GoArch = GoArch @@ -87,7 +99,11 @@ func init() { rootCmd.PersistentFlags().BoolVar(&conf.Verbose, "verbose", false, "Print detailed execution info") rootCmd.PersistentFlags().StringVar(&memprofile, "memprofile", "", "Write a memory profile to this file readable by pprof") + configMergeCmd.PersistentFlags().StringVar(&cfgGroup, "cluster", "", "Configuration group (default is none)") + configMergeCmd.PersistentFlags().StringVar(&conf.ConfigFile, "config", "", "Configuration file (default is config.toml)") + rootCmd.AddCommand(versionCmd) + rootCmd.AddCommand(configMergeCmd) } From f023b16e7d70a9a3269641b56e3c4238b77bb34d Mon Sep 17 00:00:00 2001 From: emma Date: Tue, 2 May 2023 12:38:17 +0200 Subject: [PATCH 20/39] add git clone directory when initConfig, change sme by StateMachine, add debug StateMachine --- cluster/cluster.go | 166 ++++++++++++++----------------- cluster/cluster_add.go | 8 +- cluster/cluster_bck.go | 6 +- cluster/cluster_chk.go | 34 ++++--- cluster/cluster_fail.go | 76 +++++++------- cluster/cluster_get.go | 8 +- cluster/cluster_has.go | 8 +- cluster/cluster_set.go | 18 ++-- cluster/cluster_topo.go | 4 +- cluster/cluster_wait.go | 6 +- cluster/prov.go | 36 +++---- cluster/prov_opensvc.go | 4 +- cluster/prx.go | 4 +- cluster/prx_haproxy.go | 10 +- cluster/prx_mariadbshardproxy.go | 4 +- cluster/prx_maxscale.go | 12 +-- cluster/prx_proxysql.go | 42 ++++---- cluster/prx_sphinx.go | 8 +- cluster/srv.go | 24 ++--- cluster/srv_chk.go | 50 +++++----- cluster/srv_job.go | 28 +++--- cluster/srv_rejoin.go | 8 +- config/config.go | 71 +++++++++++++ server/server.go | 38 +++++-- server/server_cmd.go | 19 +--- server/server_monitor.go | 37 ++++--- utils/state/state.go | 42 ++++---- 27 files changed, 423 insertions(+), 348 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 384d0b204..d9e54c64e 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -21,7 +21,6 @@ import ( "github.com/bluele/logrus_slack" "github.com/go-git/go-git/v5" - git_ex "github.com/go-git/go-git/v5/_examples" git_obj "github.com/go-git/go-git/v5/plumbing/object" git_http "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/pelletier/go-toml" @@ -138,7 +137,7 @@ type Cluster struct { rplPass string `json:"-"` proxysqlUser string `json:"-"` proxysqlPass string `json:"-"` - sme *state.StateMachine `json:"-"` + StateMachine *state.StateMachine `json:"stateMachine"` runOnceAfterTopology bool `json:"-"` logPtr *os.File `json:"-"` termlength int `json:"-"` @@ -354,8 +353,8 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, imm map[string]interface cluster.Schedule = make(map[string]cron.Entry) cluster.JobResults = make(map[string]*JobResult) // Initialize the state machine at this stage where everything is fine. - cluster.sme = new(state.StateMachine) - cluster.sme.Init() + cluster.StateMachine = new(state.StateMachine) + cluster.StateMachine.Init() cluster.Conf = conf if cluster.Conf.Interactive { cluster.LogPrintf(LvlInfo, "Failover in interactive mode") @@ -575,13 +574,13 @@ func (cluster *Cluster) Run() { wg.Add(1) go cluster.refreshProxies(wg) } - if cluster.sme.SchemaMonitorEndTime+60 < time.Now().Unix() && !cluster.sme.IsInSchemaMonitor() { + if cluster.StateMachine.SchemaMonitorEndTime+60 < time.Now().Unix() && !cluster.StateMachine.IsInSchemaMonitor() { go cluster.MonitorSchema() } if cluster.Conf.TestInjectTraffic || cluster.Conf.AutorejoinSlavePositionalHeartbeat || cluster.Conf.MonitorWriteHeartbeat { cluster.InjectProxiesTraffic() } - if cluster.sme.GetHeartbeats()%30 == 0 { + if cluster.StateMachine.GetHeartbeats()%30 == 0 { go cluster.initOrchetratorNodes() cluster.MonitorQueryRules() cluster.MonitorVariablesDiff() @@ -591,11 +590,11 @@ func (cluster *Cluster) Run() { cluster.CheckCanSaveDynamicConfig() } else { - cluster.sme.PreserveState("WARN0093") - cluster.sme.PreserveState("WARN0084") - cluster.sme.PreserveState("WARN0095") - cluster.sme.PreserveState("WARN0101") - cluster.sme.PreserveState("ERR00090") + cluster.StateMachine.PreserveState("WARN0093") + cluster.StateMachine.PreserveState("WARN0084") + cluster.StateMachine.PreserveState("WARN0095") + cluster.StateMachine.PreserveState("WARN0101") + cluster.StateMachine.PreserveState("ERR00090") } if !cluster.CanInitNodes { cluster.SetState("ERR00082", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00082"], cluster.errorInitNodes), ErrFrom: "OPENSVC"}) @@ -604,10 +603,10 @@ func (cluster *Cluster) Run() { cluster.SetState("ERR00089", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00089"], cluster.errorConnectVault), ErrFrom: "OPENSVC"}) } - if cluster.sme.GetHeartbeats()%36000 == 0 { + if cluster.StateMachine.GetHeartbeats()%36000 == 0 { cluster.ResticPurgeRepo() } else { - cluster.sme.PreserveState("WARN0094") + cluster.StateMachine.PreserveState("WARN0094") } } wg.Wait() @@ -634,9 +633,9 @@ func (cluster *Cluster) Run() { } func (cluster *Cluster) StateProcessing() { - if !cluster.sme.IsInFailover() { + if !cluster.StateMachine.IsInFailover() { // trigger action on resolving states - cstates := cluster.sme.GetResolvedStates() + cstates := cluster.StateMachine.GetResolvedStates() mybcksrv := cluster.GetBackupServer() master := cluster.GetMaster() for _, s := range cstates { @@ -696,28 +695,28 @@ func (cluster *Cluster) StateProcessing() { } var states []string if cluster.runOnceAfterTopology { - states = cluster.sme.GetFirstStates() + states = cluster.StateMachine.GetFirstStates() } else { - states = cluster.sme.GetStates() + states = cluster.StateMachine.GetStates() } for i := range states { cluster.LogPrintf("STATE", states[i]) } // trigger action on resolving states - ostates := cluster.sme.GetOpenStates() + ostates := cluster.StateMachine.GetOpenStates() for _, s := range ostates { cluster.CheckCapture(s) } - for _, s := range cluster.sme.GetLastOpenedStates() { + for _, s := range cluster.StateMachine.GetLastOpenedStates() { cluster.CheckAlert(s) } - cluster.sme.ClearState() - if cluster.sme.GetHeartbeats()%60 == 0 { + cluster.StateMachine.ClearState() + if cluster.StateMachine.GetHeartbeats()%60 == 0 { cluster.Save() } @@ -745,7 +744,7 @@ func (cluster *Cluster) Save() error { var clsave Save clsave.Crashes = cluster.Crashes clsave.Servers = cluster.Conf.Hosts - clsave.SLA = cluster.sme.GetSla() + clsave.SLA = cluster.StateMachine.GetSla() clsave.IsAllDbUp = cluster.IsAllDbUp clsave.SLAHistory = cluster.SLAHistory @@ -795,7 +794,7 @@ func (cluster *Cluster) Save() error { var clsave Save clsave.Crashes = cluster.Crashes clsave.Servers = cluster.Conf.Hosts - clsave.SLA = cluster.sme.GetSla() + clsave.SLA = cluster.StateMachine.GetSla() clsave.IsAllDbUp = cluster.IsAllDbUp clsave.SLAHistory = cluster.SLAHistory @@ -814,7 +813,8 @@ func (cluster *Cluster) Save() error { if cluster.Conf.ConfRewrite { //clone git repository in case its the first time if cluster.Conf.GitUrl != "" { - CloneConfigFromGit(cluster.Conf.GitUrl, cluster.Conf.GitAccesToken, cluster.WorkingDir, cluster.Name) + CloneConfigFromGit(cluster.Conf.GitUrl, cluster.Conf.GitAccesToken, cluster.GetConf().WorkingDir) + } //fmt.Printf("SAVE CLUSTER \n") @@ -892,52 +892,7 @@ func (cluster *Cluster) Save() error { //to load the new generated config file in github if cluster.Conf.GitUrl != "" { - /* - directory := cluster.Conf.WorkingDir + "/" + cluster.Name - r, err := git.PlainOpen(directory) - if err != nil { - log.Errorf("Git error : cannot PlainOpen : %s", err) - } - - w, err := r.Worktree() - if err != nil { - log.Errorf("Git error : cannot Worktree : %s", err) - } - - msg := "Update " + cluster.Name + ".toml file" - - // Adds the new file to the staging area. - git_ex.Info("git add " + cluster.Name + ".toml") - _, err = w.Add(cluster.Name + ".toml") - if err != nil { - log.Errorf("Git error : cannot Add %s : %s", cluster.Name+".toml", err) - } - - git_ex.Info("git commit -a -m \"New config file\"") - _, err = w.Commit(msg, &git.CommitOptions{ - Author: &git_obj.Signature{ - Name: "Replication-manager", - Email: cluster.Conf.MailFrom, - When: time.Now(), - }, - }) - - if err != nil { - log.Errorf("Git error : cannot Commit : %s", err) - } - - git_ex.Info("git push") - // push using default options - err = r.Push(&git.PushOptions{Auth: &git_http.BasicAuth{ - Username: "toto", // yes, this can be anything except an empty string - Password: cluster.Conf.GitAccesToken, - }}) - if err != nil { - log.Errorf("Git error : cannot Push : %s", err) - - }*/ - PushConfigToGit(cluster.Conf.GitAccesToken, cluster.WorkingDir, cluster.Name) - + PushConfigToGit(cluster.Conf.GitAccesToken, cluster.GetConf().WorkingDir, cluster.Name) } err = cluster.Overwrite() @@ -950,10 +905,12 @@ func (cluster *Cluster) Save() error { } func PushConfigToGit(tok string, dir string, name string) { + //fmt.Printf("Push from git : tok %s, dir %s, name %s\n", tok, dir, name) path := dir - if !strings.Contains(dir, name) { - path += "/" + name - } + /* + if !strings.Contains(dir, name) { + path += "/" + name + }*/ //directory := cluster.Conf.WorkingDir + "/" + cluster.Name r, err := git.PlainOpen(path) if err != nil { @@ -968,13 +925,18 @@ func PushConfigToGit(tok string, dir string, name string) { msg := "Update " + name + ".toml file" // Adds the new file to the staging area. - git_ex.Info("git add " + name + ".toml") - _, err = w.Add(name + ".toml") + //git_ex.Info("git add " + name + ".toml") + //_, err = w.Add(name + "/" + name + ".toml") + _, err = w.Add(name) if err != nil { log.Errorf("Git error : cannot Add %s : %s", name+".toml", err) } + _, err = w.Add(name + "/" + name + ".toml") + if err != nil { + log.Errorf("Git error : cannot Add %s : %s", name+"/"+name+".toml", err) + } - git_ex.Info("git commit -a -m \"New config file\"") + //git_ex.Info("git commit -a -m \"New config file\"") _, err = w.Commit(msg, &git.CommitOptions{ Author: &git_obj.Signature{ Name: "Replication-manager", @@ -986,7 +948,7 @@ func PushConfigToGit(tok string, dir string, name string) { log.Errorf("Git error : cannot Commit : %s", err) } - git_ex.Info("git push") + //git_ex.Info("git push") // push using default options err = r.Push(&git.PushOptions{Auth: &git_http.BasicAuth{ Username: "toto", // yes, this can be anything except an empty string @@ -998,12 +960,26 @@ func PushConfigToGit(tok string, dir string, name string) { } } -func CloneConfigFromGit(url string, tok string, dir string, name string) { - //fmt.Printf("Clone from git : url %s, tok %s, dir %s, name %s\n", url, tok, dir, name) - path := dir - if !strings.Contains(dir, name) { - path += "/" + name +func CloneConfigFromGit(url string, tok string, dir string) { + + //fmt.Printf("Clone from git : url %s, tok %s, dir %s\n", url, tok, dir) + if _, err := os.Stat(dir + "/.gitignore"); os.IsNotExist(err) { + file, err := os.Create(dir + "/.gitignore") + if err != nil { + if os.IsPermission(err) { + log.Errorf(LvlInfo, "File permission denied: %s, %s", dir+".gitignore", err) + } + } + defer file.Close() + file.WriteString("/*\n!/*/*.toml\n") + file.Sync() } + + path := dir + /* + if !strings.Contains(dir, name) { + path += "/" + name + }*/ if _, err := os.Stat(path + "/.git"); err == nil { // We instantiate a new repository targeting the given path (the .git folder) @@ -1021,7 +997,7 @@ func CloneConfigFromGit(url string, tok string, dir string, name string) { } // Pull the latest changes from the origin remote and merge into the current branch - git_ex.Info("git pull origin") + //git_ex.Info("git pull origin") err = w.Pull(&git.PullOptions{RemoteName: "origin"}) if err != nil && fmt.Sprintf("%v", err) != "already up-to-date" { @@ -1030,7 +1006,7 @@ func CloneConfigFromGit(url string, tok string, dir string, name string) { } else { // Clone the given repository to the given directory - git_ex.Info("git clone %s %s --recursive", url, path) + //git_ex.Info("git clone %s %s --recursive", url, path) _, err := git.PlainClone(path, false, &git.CloneOptions{ URL: url, @@ -1113,6 +1089,12 @@ func (cluster *Cluster) SaveClusterFromScratch() error { } file.WriteString("[" + cluster.Name + "]\n title = \"" + cluster.Name + " \" \n") s.WriteTo(file) + + err = cluster.Overwrite() + if err != nil { + cluster.LogPrintf(LvlInfo, "Error during Overwriting: %s", err) + } + return nil } @@ -1173,7 +1155,7 @@ func (cluster *Cluster) InitAgent(conf config.Config) { /*func (cluster *Cluster) ReloadConfig(conf config.Config) { cluster.Conf = conf cluster.Configurator.SetConfig(conf) - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() cluster.runOnceAfterTopology = true cluster.SetUnDiscovered() @@ -1183,14 +1165,14 @@ func (cluster *Cluster) InitAgent(conf config.Config) { go cluster.TopologyDiscover(wg) wg.Wait() cluster.newProxyList() - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() cluster.initProxies() }*/ func (cluster *Cluster) ReloadConfig(conf config.Config) { cluster.Conf = conf cluster.Configurator.SetConfig(conf) - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() cluster.ResetStates() wg := new(sync.WaitGroup) @@ -1224,11 +1206,11 @@ func (cluster *Cluster) FailoverForce() error { wg.Wait() if err != nil { - for _, s := range cluster.sme.GetStates() { + for _, s := range cluster.StateMachine.GetStates() { cluster.LogPrint(s) } // Test for ERR00012 - No master detected - if cluster.sme.CurState.Search("ERR00012") { + if cluster.StateMachine.CurState.Search("ERR00012") { for _, s := range cluster.Servers { if s.State == "" { s.SetState(stateFailed) @@ -1398,7 +1380,7 @@ func (cluster *Cluster) MonitorSchema() { if cluster.GetMaster().Conn == nil { return } - cluster.sme.SetMonitorSchemaState() + cluster.StateMachine.SetMonitorSchemaState() cluster.GetMaster().Conn.SetConnMaxLifetime(3595 * time.Second) tables, tablelist, logs, err := dbhelper.GetTables(cluster.GetMaster().Conn, cluster.GetMaster().DBVersion) @@ -1465,7 +1447,7 @@ func (cluster *Cluster) MonitorSchema() { cluster.DBIndexSize = totindexsize cluster.DBTableSize = tottablesize cluster.GetMaster().DictTables = tables - cluster.sme.RemoveMonitorSchemaState() + cluster.StateMachine.RemoveMonitorSchemaState() } func (cluster *Cluster) MonitorQueryRules() { @@ -1606,6 +1588,6 @@ func (cluster *Cluster) ResetStates() { cluster.newServerList() cluster.newProxyList() - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() cluster.initProxies() } diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index bab97852a..897e8802c 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -23,13 +23,13 @@ func (cluster *Cluster) AddSeededServer(srv string) error { } else { cluster.Conf.Hosts = srv } - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() cluster.newServerList() wg := new(sync.WaitGroup) wg.Add(1) go cluster.TopologyDiscover(wg) wg.Wait() - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return nil } @@ -118,11 +118,11 @@ func (cluster *Cluster) AddSeededProxy(prx string, srv string, port string, user cluster.Conf.MdbsProxyHosts = srv + ":" + port } } - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() cluster.Lock() cluster.newProxyList() cluster.Unlock() - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return nil } diff --git a/cluster/cluster_bck.go b/cluster/cluster_bck.go index 93d8622f2..270760070 100644 --- a/cluster/cluster_bck.go +++ b/cluster/cluster_bck.go @@ -64,7 +64,7 @@ func (cluster *Cluster) ResticPurgeRepo() error { err := resticcmd.Wait() if err != nil { - cluster.sme.AddState("WARN0094", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0094"], err, string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("WARN0094", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0094"], err, string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())), ErrFrom: "CHECK"}) return err } if errStdout != nil || errStderr != nil { @@ -122,7 +122,7 @@ func (cluster *Cluster) ResticInitRepo() error { err := resticcmd.Wait() if err != nil { - cluster.sme.AddState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err, string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err, string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())), ErrFrom: "CHECK"}) } if errStdout != nil || errStderr != nil { return errors.New("failed to capture stdout or stderr\n") @@ -159,7 +159,7 @@ func (cluster *Cluster) ResticFetchRepo() error { err := resticcmd.Wait() if err != nil { - cluster.sme.AddState("WARN0093", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0093"], err, string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("WARN0093", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0093"], err, string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())), ErrFrom: "CHECK"}) cluster.ResticInitRepo() return err } diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 8727cb829..1ea589239 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -26,8 +26,8 @@ import ( func (cluster *Cluster) CheckFailed() { // Don't trigger a failover if a switchover is happening - if cluster.sme.IsInFailover() { - cluster.sme.AddState("ERR00001", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00001"]), ErrFrom: "CHECK"}) + if cluster.StateMachine.IsInFailover() { + cluster.StateMachine.AddState("ERR00001", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00001"]), ErrFrom: "CHECK"}) return } if cluster.master == nil { @@ -115,11 +115,13 @@ func (cluster *Cluster) isAutomaticFailover() bool { if cluster.Conf.Interactive == false { return true } - cluster.sme.AddState("ERR00002", state.State{ErrType: "ERR00002", ErrDesc: fmt.Sprintf(clusterError["ERR00002"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00002", state.State{ErrType: "ERR00002", ErrDesc: fmt.Sprintf(clusterError["ERR00002"]), ErrFrom: "CHECK"}) return false } func (cluster *Cluster) isMasterFailed() bool { + //if master not discover, we can considered it not failed + //can cause infinity loops if set to true if cluster.master == nil { return false } @@ -134,10 +136,10 @@ func (cluster *Cluster) isMaxMasterFailedCountReached() bool { // no illimited failed count if cluster.GetMaster().FailCount >= cluster.Conf.MaxFail { - cluster.sme.AddState("WARN0023", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0023"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("WARN0023", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0023"]), ErrFrom: "CHECK"}) return true } else { - // cluster.sme.AddState("ERR00023", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf("Constraint is blocking state %s, interactive:%t, maxfail reached:%d", cluster.master.State, cluster.Conf.Interactive, cluster.Conf.MaxFail), ErrFrom: "CONF"}) + // cluster.StateMachine.AddState("ERR00023", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf("Constraint is blocking state %s, interactive:%t, maxfail reached:%d", cluster.master.State, cluster.Conf.Interactive, cluster.Conf.MaxFail), ErrFrom: "CONF"}) } return false } @@ -149,7 +151,7 @@ func (cluster *Cluster) isMaxClusterFailoverCountNotReached() bool { return true } if cluster.FailoverCtr == cluster.Conf.FailLimit { - cluster.sme.AddState("ERR00027", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00027"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00027", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00027"]), ErrFrom: "CHECK"}) return false } return true @@ -163,7 +165,7 @@ func (cluster *Cluster) isBetweenFailoverTimeValid() bool { } // cluster.LogPrintf("CHECK: Failover Time to short with previous failover") if rem > 0 { - cluster.sme.AddState("ERR00029", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00029"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00029", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00029"]), ErrFrom: "CHECK"}) return false } return true @@ -192,7 +194,7 @@ func (cluster *Cluster) isOneSlaveHeartbeatIncreasing() bool { cluster.LogPrintf(LvlDbg, "SLAVE_RECEIVED_HEARTBEATS %d", status2["SLAVE_RECEIVED_HEARTBEATS"]) } if status2["SLAVE_RECEIVED_HEARTBEATS"] > saveheartbeats { - cluster.sme.AddState("ERR00028", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00028"], s.URL), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00028", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00028"], s.URL), ErrFrom: "CHECK"}) return true } } @@ -252,7 +254,7 @@ func (cluster *Cluster) isMaxscaleSupectRunning() bool { time.Sleep(time.Duration(cluster.Conf.CheckFalsePositiveMaxscaleTimeout) * time.Second) if strings.Contains(cluster.master.MxsServerStatus, "Running") { - cluster.sme.AddState("ERR00030", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00030"], cluster.master.MxsServerStatus), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00030", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00030"], cluster.master.MxsServerStatus), ErrFrom: "CHECK"}) return true } return false @@ -270,7 +272,7 @@ func (cluster *Cluster) isFoundCandidateMaster() bool { } if key == -1 { // No candidates found in slaves list - cluster.sme.AddState("ERR00032", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00032"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00032", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00032"]), ErrFrom: "CHECK"}) return false } return true @@ -297,7 +299,7 @@ func (cluster *Cluster) isActiveArbitration() bool { resp, err := client.Do(req) if err != nil { cluster.LogPrintf(LvlErr, "%s", err.Error()) - cluster.sme.AddState("ERR00022", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00022"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00022", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00022"]), ErrFrom: "CHECK"}) return false } defer resp.Body.Close() @@ -311,14 +313,14 @@ func (cluster *Cluster) isActiveArbitration() bool { err = json.Unmarshal(body, &r) if err != nil { cluster.LogPrintf(LvlErr, "Arbitrator sent invalid JSON") - cluster.sme.AddState("ERR00022", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00022"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00022", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00022"]), ErrFrom: "CHECK"}) return false } if r.Arbitration == "winner" { cluster.LogPrintf(LvlInfo, "Arbitrator says: winner") return true } - cluster.sme.AddState("ERR00022", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00022"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00022", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00022"]), ErrFrom: "CHECK"}) return false } @@ -336,7 +338,7 @@ func (cluster *Cluster) isExternalOk() bool { return false } if req.StatusCode == 200 { - cluster.sme.AddState("ERR00031", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00031"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00031", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00031"]), ErrFrom: "CHECK"}) return true } return false @@ -347,7 +349,7 @@ func (cluster *Cluster) isArbitratorAlive() bool { return true } if cluster.IsFailedArbitrator { - cluster.sme.AddState("ERR00055", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00055"], cluster.Conf.ArbitrationSasHosts), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00055", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00055"], cluster.Conf.ArbitrationSasHosts), ErrFrom: "CHECK"}) return false } return true @@ -362,7 +364,7 @@ func (cluster *Cluster) isNotFirstSlave() bool { // - first replication-manager start on no topology // - all cluster down if cluster.master == nil { - cluster.sme.AddState("ERR00026", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00026"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00026", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00026"]), ErrFrom: "CHECK"}) return false } diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index 11948b3b5..73605f7f8 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -30,7 +30,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { res := cluster.VMasterFailover(fail) return res } - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() // Phase 1: Cleanup and election var err error if fail == false { @@ -50,7 +50,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.LogSQL(logs, err, cluster.master.URL, "MasterFailover", LvlDbg, "CheckLongRunningWrites") if qt > 0 { cluster.LogPrintf(LvlErr, "Long updates running on master. Cannot switchover") - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } @@ -81,14 +81,14 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { } case <-time.After(time.Second * time.Duration(cluster.Conf.SwitchWaitTrx)): cluster.LogPrintf(LvlErr, "Long running trx on master at least %d, can not switchover ", cluster.Conf.SwitchWaitTrx) - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } } else { if cluster.Conf.MultiMasterGrouprep { // group replication auto elect a new master in case of failure do nothing - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return true } cluster.LogPrintf(LvlInfo, "------------------------") @@ -107,7 +107,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { } if key == -1 { cluster.LogPrintf(LvlErr, "No candidates found") - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } @@ -115,7 +115,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { if fail && !cluster.isSlaveElectable(cluster.slaves[key], true) { cluster.LogPrintf(LvlInfo, "Elected slave have issue cancelling failover", cluster.slaves[key].URL) - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } // Shuffle the server list @@ -532,7 +532,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.FailoverCtr++ cluster.FailoverTs = time.Now().Unix() } - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() // Not a prefered master this code is not default if cluster.Conf.FailoverSwitchToPrefered && fail == true && cluster.Conf.PrefMaster != "" && !cluster.master.IsPrefered() { @@ -675,7 +675,7 @@ func (cluster *Cluster) electSwitchoverGroupReplicationCandidate(l []*ServerMoni // Return one not ignored not full , not prefered for i, sl := range l { if sl.IsIgnored() { - cluster.sme.AddState("ERR00037", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00037"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00037", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00037"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } if cluster.IsInPreferedHosts(sl) { @@ -703,7 +703,7 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog /* If server is in the ignore list, do not elect it in switchover */ if sl.IsIgnored() { - cluster.sme.AddState("ERR00037", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00037"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00037", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00037"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } if sl.IsFull { @@ -711,7 +711,7 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog } //Need comment// if sl.IsRelay { - cluster.sme.AddState("ERR00036", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00036"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00036", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00036"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } if !sl.HasBinlog() && !sl.IsIgnored() { @@ -719,19 +719,19 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog continue } if cluster.Conf.MultiMaster == true && sl.State == stateMaster { - cluster.sme.AddState("ERR00035", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00035"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00035", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00035"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } // The tests below should run only in case of a switchover as they require the master to be up. if cluster.isSlaveElectableForSwitchover(sl, forcingLog) == false { - cluster.sme.AddState("ERR00034", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00034"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00034", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00034"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } /* binlog + ping */ if cluster.isSlaveElectable(sl, forcingLog) == false { - cluster.sme.AddState("ERR00039", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00039"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00039", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00039"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } @@ -743,14 +743,14 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog return i } if sl.HaveNoMasterOnStart == true && cluster.Conf.FailRestartUnsafe == false { - cluster.sme.AddState("ERR00084", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00084"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00084", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00084"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } ss, errss := sl.GetSlaveStatus(sl.ReplicationSourceName) // not a slave if errss != nil && cluster.Conf.FailRestartUnsafe == false { //Skip slave in election %s have no master log file, slave might have failed - cluster.sme.AddState("ERR00033", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00033"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00033", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00033"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } // Fake position if none as new slave @@ -844,19 +844,19 @@ func (cluster *Cluster) electFailoverCandidate(l []*ServerMonitor, forcingLog bo //Need comment// if sl.IsRelay { - cluster.sme.AddState("ERR00036", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00036"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00036", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00036"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) continue } if sl.IsFull { continue } if cluster.Conf.MultiMaster == true && sl.State == stateMaster { - cluster.sme.AddState("ERR00035", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00035"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00035", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00035"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) trackposList[i].Ignoredmultimaster = true continue } if sl.HaveNoMasterOnStart == true && cluster.Conf.FailRestartUnsafe == false { - cluster.sme.AddState("ERR00084", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00084"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00084", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00084"], sl.URL), ServerUrl: sl.URL, ErrFrom: "CHECK"}) continue } if !sl.HasBinlog() && !sl.IsIgnored() { @@ -879,7 +879,7 @@ func (cluster *Cluster) electFailoverCandidate(l []*ServerMonitor, forcingLog bo ss, errss := sl.GetSlaveStatus(sl.ReplicationSourceName) // not a slave if errss != nil && cluster.Conf.FailRestartUnsafe == false { - cluster.sme.AddState("ERR00033", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00033"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00033", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00033"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) trackposList[i].Ignoredreplication = true continue } @@ -934,7 +934,7 @@ func (cluster *Cluster) electFailoverCandidate(l []*ServerMonitor, forcingLog bo } //end loop all slaves if !HaveOneValidReader { - cluster.sme.AddState("ERR00085", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00085"]), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00085", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00085"]), ErrFrom: "CHECK"}) } sort.Slice(trackposList[:], func(i, j int) bool { @@ -1024,7 +1024,7 @@ func (cluster *Cluster) isSlaveElectable(sl *ServerMonitor, forcingLog bool) boo } //if master is alived and IO Thread stops then not a good candidate and not forced if ss.SlaveIORunning.String == "No" && cluster.Conf.RplChecks && !cluster.IsMasterFailed() { - cluster.sme.AddState("ERR00087", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00087"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00087", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00087"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Unsafe failover condition. Slave %s IO Thread is stopped %s. Skipping", sl.URL, ss.LastIOError.String) } @@ -1033,14 +1033,14 @@ func (cluster *Cluster) isSlaveElectable(sl *ServerMonitor, forcingLog bool) boo /* binlog + ping */ if dbhelper.CheckSlavePrerequisites(sl.Conn, sl.Host, sl.DBVersion) == false { - cluster.sme.AddState("ERR00040", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00040"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00040", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00040"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Slave %s does not ping or has no binlogs. Skipping", sl.URL) } return false } if sl.IsMaintenance { - cluster.sme.AddState("ERR00047", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00047"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00047", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00047"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Slave %s is in maintenance. Skipping", sl.URL) } @@ -1048,7 +1048,7 @@ func (cluster *Cluster) isSlaveElectable(sl *ServerMonitor, forcingLog bool) boo } if ss.SecondsBehindMaster.Int64 > cluster.Conf.FailMaxDelay && cluster.Conf.FailMaxDelay != -1 && cluster.Conf.RplChecks == true { - cluster.sme.AddState("ERR00041", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00041"]+" Sql: "+sl.GetProcessListReplicationLongQuery(), sl.URL, cluster.Conf.FailMaxDelay, ss.SecondsBehindMaster.Int64), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00041", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00041"]+" Sql: "+sl.GetProcessListReplicationLongQuery(), sl.URL, cluster.Conf.FailMaxDelay, ss.SecondsBehindMaster.Int64), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Unsafe failover condition. Slave %s has more than failover-max-delay %d seconds with replication delay %d. Skipping", sl.URL, cluster.Conf.FailMaxDelay, ss.SecondsBehindMaster.Int64) } @@ -1057,7 +1057,7 @@ func (cluster *Cluster) isSlaveElectable(sl *ServerMonitor, forcingLog bool) boo } if ss.SlaveSQLRunning.String == "No" && cluster.Conf.RplChecks { - cluster.sme.AddState("ERR00042", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00042"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00042", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00042"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Unsafe failover condition. Slave %s SQL Thread is stopped. Skipping", sl.URL) } @@ -1068,13 +1068,13 @@ func (cluster *Cluster) isSlaveElectable(sl *ServerMonitor, forcingLog bool) boo if ss.SlaveIORunning.String == "Connecting" && !cluster.IsMasterFailed() { cluster.LogPrintf(LvlDbg, "isSlaveElect lastIOErrno: %s", ss.LastIOErrno.String) if ss.LastIOErrno.String == "1045" { - cluster.sme.AddState("ERR00088", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00088"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00088", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00088"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) sl.SetReplicationCredentialsRotation(ss) } } if sl.HaveSemiSync && sl.SemiSyncSlaveStatus == false && cluster.Conf.FailSync && cluster.Conf.RplChecks { - cluster.sme.AddState("ERR00043", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00043"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00043", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00043"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Semi-sync slave %s is out of sync. Skipping", sl.URL) } @@ -1097,7 +1097,7 @@ func (cluster *Cluster) isSlaveValidReader(sl *ServerMonitor, forcingLog bool) b } if sl.IsMaintenance { - cluster.sme.AddState("ERR00047", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00047"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00047", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00047"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Slave %s is in maintenance. Skipping", sl.URL) } @@ -1105,7 +1105,7 @@ func (cluster *Cluster) isSlaveValidReader(sl *ServerMonitor, forcingLog bool) b } /*if ss.SecondsBehindMaster.Int64 > cluster.Conf.FailMaxDelay && cluster.Conf.FailMaxDelay != -1 { - cluster.sme.AddState("ERR00041", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00041"]+" Sql: "+sl.GetProcessListReplicationLongQuery(), sl.URL, cluster.Conf.FailMaxDelay, ss.SecondsBehindMaster.Int64), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00041", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00041"]+" Sql: "+sl.GetProcessListReplicationLongQuery(), sl.URL, cluster.Conf.FailMaxDelay, ss.SecondsBehindMaster.Int64), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Unsafe failover condition. Slave %s has more than failover-max-delay %d seconds with replication delay %d. Skipping", sl.URL, cluster.Conf.FailMaxDelay, ss.SecondsBehindMaster.Int64) } @@ -1113,7 +1113,7 @@ func (cluster *Cluster) isSlaveValidReader(sl *ServerMonitor, forcingLog bool) b return false } if sl.HaveSemiSync && sl.SemiSyncSlaveStatus == false && cluster.Conf.FailSync && cluster.Conf.RplChecks { - cluster.sme.AddState("ERR00043", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00043"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00043", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00043"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Semi-sync slave %s is out of sync. Skipping", sl.URL) } @@ -1121,7 +1121,7 @@ func (cluster *Cluster) isSlaveValidReader(sl *ServerMonitor, forcingLog bool) b } */ if ss.SlaveSQLRunning.String == "No" { - cluster.sme.AddState("ERR00042", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00042"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) + cluster.StateMachine.AddState("ERR00042", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00042"], sl.URL), ErrFrom: "CHECK", ServerUrl: sl.URL}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Unsafe failover condition. Slave %s SQL Thread is stopped. Skipping", sl.URL) } @@ -1148,7 +1148,7 @@ func (cluster *Cluster) foundPreferedMaster(l []*ServerMonitor) *ServerMonitor { // VMasterFailover triggers a leader change and returns the new master URL when all possible leader multimaster ring or galera func (cluster *Cluster) VMasterFailover(fail bool) bool { - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() // Phase 1: Cleanup and election var err error cluster.oldMaster = cluster.vmaster @@ -1169,7 +1169,7 @@ func (cluster *Cluster) VMasterFailover(fail bool) bool { cluster.LogSQL(logs, err, cluster.vmaster.URL, "MasterFailover", LvlDbg, "CheckLongRunningWrites") if qt > 0 { cluster.LogPrintf(LvlErr, "Long updates running on virtual master. Cannot switchover") - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } @@ -1190,7 +1190,7 @@ func (cluster *Cluster) VMasterFailover(fail bool) bool { } case <-time.After(time.Second * time.Duration(cluster.Conf.SwitchWaitTrx)): cluster.LogPrintf(LvlErr, "Long running trx on master at least %d, can not switchover ", cluster.Conf.SwitchWaitTrx) - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } cluster.master = cluster.vmaster @@ -1217,7 +1217,7 @@ func (cluster *Cluster) VMasterFailover(fail bool) bool { } if key == -1 { cluster.LogPrintf(LvlErr, "No candidates found") - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return false } cluster.LogPrintf(LvlInfo, "Server %s has been elected as a new master", cluster.slaves[key].URL) @@ -1242,7 +1242,7 @@ func (cluster *Cluster) VMasterFailover(fail bool) bool { } if !fail && cluster.Conf.MultiMasterGrouprep { result, errswitch := cluster.slaves[key].SetGroupReplicationPrimary() - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() if errswitch == nil { cluster.LogPrintf(LvlInfo, "Server %s elected as new leader %s", cluster.slaves[key].URL, result) @@ -1368,7 +1368,7 @@ func (cluster *Cluster) VMasterFailover(fail bool) bool { } cluster.master = nil - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return true } @@ -1377,7 +1377,7 @@ func (cluster *Cluster) electVirtualCandidate(oldMaster *ServerMonitor, forcingL for i, sl := range cluster.Servers { /* If server is in the ignore list, do not elect it */ if sl.IsIgnored() { - cluster.sme.AddState("ERR00037", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00037"], sl.URL), ErrFrom: "CHECK"}) + cluster.StateMachine.AddState("ERR00037", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00037"], sl.URL), ErrFrom: "CHECK"}) if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlDbg, "%s is in the ignore list. Skipping", sl.URL) } diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index f00d245e7..d9b4ca4d5 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -134,8 +134,8 @@ func (cluster *Cluster) GetPersitentState() error { } cluster.SLAHistory = clsave.SLAHistory cluster.Crashes = clsave.Crashes - cluster.sme.SetSla(clsave.SLA) - cluster.sme.SetMasterUpAndSyncRestart() + cluster.StateMachine.SetSla(clsave.SLA) + cluster.StateMachine.SetMasterUpAndSyncRestart() return nil } @@ -180,7 +180,7 @@ func (cluster *Cluster) GetWaitTrx() int64 { } func (cluster *Cluster) GetStateMachine() *state.StateMachine { - return cluster.sme + return cluster.StateMachine } func (cluster *Cluster) GetMasterFailCount() int { @@ -281,7 +281,7 @@ func (cluster *Cluster) GetDbPass() string { } func (cluster *Cluster) GetStatus() bool { - return cluster.sme.IsFailable() + return cluster.StateMachine.IsFailable() } func (cluster *Cluster) GetGroupReplicationWhiteList() string { diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 06c5015d0..a47ff1545 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -52,11 +52,11 @@ func (cluster *Cluster) HasSchedulerEntry(myname string) bool { func (cluster *Cluster) HasNoValidSlave() bool { //All slave stopped - if cluster.sme.IsInState("ERR00010") { + if cluster.StateMachine.IsInState("ERR00010") { return true } // Any issues on all slaves expeting delay and network - if cluster.sme.IsInState("ERR00085") { + if cluster.StateMachine.IsInState("ERR00085") { return true } return false @@ -324,11 +324,11 @@ func (cluster *Cluster) IsVerbose() bool { } func (cluster *Cluster) IsInFailover() bool { - return cluster.sme.IsInFailover() + return cluster.StateMachine.IsInFailover() } func (cluster *Cluster) IsDiscovered() bool { - return cluster.sme.IsDiscovered() + return cluster.StateMachine.IsDiscovered() } func (cluster *Cluster) IsMultiMaster() bool { diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 2f6f2f416..7b822554c 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -30,14 +30,14 @@ import ( func (cluster *Cluster) SetStatus() { if cluster.master == nil { - cluster.sme.SetMasterUpAndSync(false, false, false) + cluster.StateMachine.SetMasterUpAndSync(false, false, false) } else { - cluster.sme.SetMasterUpAndSync(!cluster.master.IsDown(), cluster.master.SemiSyncMasterStatus, cluster.master.HaveHealthyReplica) + cluster.StateMachine.SetMasterUpAndSync(!cluster.master.IsDown(), cluster.master.SemiSyncMasterStatus, cluster.master.HaveHealthyReplica) } cluster.Uptime = cluster.GetStateMachine().GetUptime() cluster.UptimeFailable = cluster.GetStateMachine().GetUptimeFailable() cluster.UptimeSemiSync = cluster.GetStateMachine().GetUptimeSemiSync() - cluster.IsNotMonitoring = cluster.sme.IsInFailover() + cluster.IsNotMonitoring = cluster.StateMachine.IsInFailover() cluster.IsCapturing = cluster.IsInCaptureMode() cluster.MonitorSpin = fmt.Sprintf("%d ", cluster.GetStateMachine().GetHeartbeats()) cluster.IsProvision = cluster.IsProvisioned() @@ -733,8 +733,8 @@ func (cluster *Cluster) SetBackupPhysicalType(backup string) { func (cluster *Cluster) SetEmptySla() { cluster.LogPrintf(LvlInfo, "Rotate SLA") - cluster.SLAHistory = append(cluster.SLAHistory, cluster.sme.GetSla()) - cluster.sme.ResetUptime() + cluster.SLAHistory = append(cluster.SLAHistory, cluster.StateMachine.GetSla()) + cluster.StateMachine.ResetUptime() } func (cluster *Cluster) SetDbServersMonitoringCredential(credential string) { @@ -845,7 +845,7 @@ func (cluster *Cluster) SetReplicationCredential(credential string) { } func (cluster *Cluster) SetUnDiscovered() { - cluster.sme.UnDiscovered() + cluster.StateMachine.UnDiscovered() cluster.Topology = topoUnknown } @@ -874,7 +874,7 @@ func (cluster *Cluster) SetClusterList(clusters map[string]*Cluster) { func (cluster *Cluster) SetState(key string, s state.State) { if !strings.Contains(cluster.Conf.MonitorIgnoreError, key) { - cluster.sme.AddState(key, s) + cluster.StateMachine.AddState(key, s) } } @@ -973,7 +973,7 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { if err != nil { cluster.LogPrintf(LvlErr, "SetServicePlan : Fail SetDbServerHosts : %s, for hosts : %s", err, strings.Join(hosts, ",")) } - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() err = cluster.newServerList() if err != nil { cluster.LogPrintf(LvlErr, "SetServicePlan : Fail newServerList : %s", err) @@ -982,7 +982,7 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { wg.Add(1) go cluster.TopologyDiscover(wg) wg.Wait() - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() cluster.Conf.ProxysqlOn = true cluster.Conf.ProxysqlHosts = "" cluster.Conf.MdbsProxyOn = true diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index 103ccb465..0e8a8e53d 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -149,7 +149,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { wg.Wait() // cluster.pingServerList() - if cluster.sme.IsInFailover() { + if cluster.StateMachine.IsInFailover() { cluster.LogPrintf(LvlDbg, "In Failover skip topology detection") return errors.New("In Failover skip topology detection") } @@ -414,7 +414,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { } - if cluster.sme.CanMonitor() { + if cluster.StateMachine.CanMonitor() { return nil } return errors.New("Error found in State Machine Engine") diff --git a/cluster/cluster_wait.go b/cluster/cluster_wait.go index b42ba5bf6..e2a8e015a 100644 --- a/cluster/cluster_wait.go +++ b/cluster/cluster_wait.go @@ -19,7 +19,7 @@ import ( ) func (cluster *Cluster) WaitFailoverEndState() { - for cluster.sme.IsInFailover() { + for cluster.StateMachine.IsInFailover() { time.Sleep(time.Second) cluster.LogPrintf(LvlInfo, "Waiting for failover stopped.") } @@ -121,7 +121,7 @@ func (cluster *Cluster) WaitClusterStop() error { cluster.LogPrintf(LvlInfo, "Waiting for cluster shutdown") exitloop++ // All cluster down - if cluster.sme.IsInState("ERR00021") == true { + if cluster.StateMachine.IsInState("ERR00021") == true { exitloop = 9999999 } if cluster.HasAllDbDown() { @@ -254,7 +254,7 @@ func (cluster *Cluster) WaitBootstrapDiscovery() error { case <-ticker.C: cluster.LogPrintf(LvlInfo, "Waiting Bootstrap and discovery") exitloop++ - if cluster.sme.IsDiscovered() { + if cluster.StateMachine.IsDiscovered() { exitloop = 9999999 } diff --git a/cluster/prov.go b/cluster/prov.go index 3f96c81b4..35ee74b5b 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -53,7 +53,7 @@ func (cluster *Cluster) Bootstrap() error { func (cluster *Cluster) ProvisionServices() error { - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() // delete the cluster state here path := cluster.WorkingDir + ".json" os.Remove(path) @@ -132,14 +132,14 @@ func (cluster *Cluster) ProvisionServices() error { } } - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return nil } func (cluster *Cluster) InitDatabaseService(server *ServerMonitor) error { - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() switch cluster.GetOrchestrator() { case config.ConstOrchestratorOpenSVC: go cluster.OpenSVCProvisionDatabaseService(server) @@ -152,13 +152,13 @@ func (cluster *Cluster) InitDatabaseService(server *ServerMonitor) error { case config.ConstOrchestratorOnPremise: go cluster.OnPremiseProvisionDatabaseService(server) default: - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return nil } cluster.ProvisionDatabaseScript(server) select { case err := <-cluster.errorChan: - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() if err == nil { server.SetProvisionCookie() } else { @@ -187,7 +187,7 @@ func (cluster *Cluster) InitProxyService(prx DatabaseProxy) error { cluster.ProvisionProxyScript(prx) select { case err := <-cluster.errorChan: - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() if err == nil { prx.SetProvisionCookie() } else { @@ -199,7 +199,7 @@ func (cluster *Cluster) InitProxyService(prx DatabaseProxy) error { func (cluster *Cluster) Unprovision() error { - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() for _, server := range cluster.Servers { switch cluster.GetOrchestrator() { case config.ConstOrchestratorOpenSVC: @@ -455,7 +455,7 @@ func (cluster *Cluster) StartAllNodes() error { func (cluster *Cluster) BootstrapReplicationCleanup() error { cluster.LogPrintf(LvlInfo, "Cleaning up replication on existing servers") - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() for _, server := range cluster.Servers { err := server.Refresh() if err != nil { @@ -500,7 +500,7 @@ func (cluster *Cluster) BootstrapReplicationCleanup() error { cluster.master = nil cluster.vmaster = nil cluster.slaves = nil - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return nil } @@ -534,17 +534,17 @@ func (cluster *Cluster) BootstrapReplication(clean bool) error { return errors.New("Environment already has an existing master/slave setup") } - cluster.sme.SetFailoverState() + cluster.StateMachine.SetFailoverState() masterKey := 0 if cluster.Conf.PrefMaster != "" { masterKey = func() int { for k, server := range cluster.Servers { if server.IsPrefered() { - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return k } } - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return -1 }() } @@ -601,14 +601,14 @@ func (cluster *Cluster) BootstrapReplication(clean bool) error { if relaykey == key { err = server.ChangeMasterTo(cluster.Servers[masterKey], "CURRENT_POS") if err != nil { - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return err } } else { err = server.ChangeMasterTo(cluster.Servers[relaykey], "CURRENT_POS") if err != nil { - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return err } } @@ -629,7 +629,7 @@ func (cluster *Cluster) BootstrapReplication(clean bool) error { if key == 0 { err = server.ChangeMasterTo(cluster.Servers[1], "CURRENT_POS") if err != nil { - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return err } if !server.ClusterGroup.IsInIgnoredReadonly(server) { @@ -639,7 +639,7 @@ func (cluster *Cluster) BootstrapReplication(clean bool) error { if key == 1 { err = server.ChangeMasterTo(cluster.Servers[0], "CURRENT_POS") if err != nil { - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return err } } @@ -657,7 +657,7 @@ func (cluster *Cluster) BootstrapReplication(clean bool) error { i := (len(cluster.Servers) + key - 1) % len(cluster.Servers) err = server.ChangeMasterTo(cluster.Servers[i], "SLAVE_POS") if err != nil { - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() return err } @@ -665,7 +665,7 @@ func (cluster *Cluster) BootstrapReplication(clean bool) error { } } - cluster.sme.RemoveFailoverState() + cluster.StateMachine.RemoveFailoverState() // speed up topology discovery wg.Add(1) cluster.TopologyDiscover(wg) diff --git a/cluster/prov_opensvc.go b/cluster/prov_opensvc.go index 69ea60456..f81bddf79 100644 --- a/cluster/prov_opensvc.go +++ b/cluster/prov_opensvc.go @@ -150,10 +150,10 @@ func (cluster *Cluster) OpenSVCWaitDequeue(svc opensvc.Collector, idaction int) time.Sleep(2 * time.Second) status := svc.GetActionStatus(strconv.Itoa(idaction)) if status == "Q" { - cluster.sme.AddState("WARN0045", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0045"]), ErrFrom: "TOPO"}) + cluster.StateMachine.AddState("WARN0045", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0045"]), ErrFrom: "TOPO"}) } if status == "W" { - cluster.sme.AddState("WARN0046", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0046"]), ErrFrom: "TOPO"}) + cluster.StateMachine.AddState("WARN0046", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0046"]), ErrFrom: "TOPO"}) } if status == "T" { return nil diff --git a/cluster/prx.go b/cluster/prx.go index cbe356ccb..08161fde4 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -238,7 +238,7 @@ func (cluster *Cluster) InjectProxiesTraffic() { } db, err := pr.GetClusterConnection() if err != nil { - cluster.sme.AddState("ERR00050", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00050"], err), ErrFrom: "TOPO"}) + cluster.StateMachine.AddState("ERR00050", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00050"], err), ErrFrom: "TOPO"}) } else { if pr.GetType() == config.ConstProxyMyProxy { definer = "DEFINER = root@localhost" @@ -248,7 +248,7 @@ func (cluster *Cluster) InjectProxiesTraffic() { _, err := db.Exec("CREATE OR REPLACE " + definer + " VIEW replication_manager_schema.pseudo_gtid_v as select '" + misc.GetUUID() + "' from dual") if err != nil { - cluster.sme.AddState("ERR00050", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00050"], err), ErrFrom: "TOPO"}) + cluster.StateMachine.AddState("ERR00050", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00050"], err), ErrFrom: "TOPO"}) db.Exec("CREATE DATABASE IF NOT EXISTS replication_manager_schema") } diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 8e3b1eeeb..8b1e4f65f 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -204,12 +204,12 @@ func (proxy *HaproxyProxy) Refresh() error { req, err := http.NewRequest("GET", url, nil) if err != nil { - cluster.sme.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) return err } resp, err := client.Do(req) if err != nil { - cluster.sme.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) return err } defer resp.Body.Close() @@ -236,7 +236,7 @@ func (proxy *HaproxyProxy) Refresh() error { showleaderstate, err := haRuntime.ApiCmd(cmd) if err != nil { - cluster.sme.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) return err } @@ -274,7 +274,7 @@ func (proxy *HaproxyProxy) Refresh() error { result, err := haRuntime.ApiCmd("show stat") if err != nil { - cluster.sme.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00052", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00052"], err), ErrFrom: "MON"}) return err } if cluster.Conf.HaproxyDebug { @@ -296,7 +296,7 @@ func (proxy *HaproxyProxy) Refresh() error { return err } if len(line) < 73 { - cluster.sme.AddState("WARN0078", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0078"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("WARN0078", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0078"], err), ErrFrom: "MON"}) return errors.New(clusterError["WARN0078"]) } if strings.Contains(strings.ToLower(line[0]), "write") { diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index 93006e5c8..c73b24ce7 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -161,7 +161,7 @@ func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MariadbShardProxy) { } schemas, _, err := cluster.master.GetSchemas() if err != nil { - cluster.sme.AddState("WARN0089", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(cluster.GetErrorList()["WARN0089"], cluster.master.URL), ErrFrom: "PROXY", ServerUrl: cluster.master.URL}) + cluster.StateMachine.AddState("WARN0089", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(cluster.GetErrorList()["WARN0089"], cluster.master.URL), ErrFrom: "PROXY", ServerUrl: cluster.master.URL}) return } foundReplicationManagerSchema := false @@ -205,7 +205,7 @@ func (proxy *MariadbShardProxy) CertificatesReload() error { func (proxy *MariadbShardProxy) Refresh() error { if proxy.ShardProxy == nil { //proxy.ClusterGroup.LogPrintf(LvlErr, "Sharding proxy refresh no database monitor yet initialize") - proxy.ClusterGroup.sme.AddState("ERR00086", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(proxy.ClusterGroup.GetErrorList()["ERR00086"]), ErrFrom: "PROXY", ServerUrl: proxy.GetURL()}) + proxy.ClusterGroup.StateMachine.AddState("ERR00086", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(proxy.ClusterGroup.GetErrorList()["ERR00086"]), ErrFrom: "PROXY", ServerUrl: proxy.GetURL()}) return errors.New("Sharding proxy refresh no database monitor yet initialize") } wg := new(sync.WaitGroup) diff --git a/cluster/prx_maxscale.go b/cluster/prx_maxscale.go index 1e19fe5cd..c2e32b868 100644 --- a/cluster/prx_maxscale.go +++ b/cluster/prx_maxscale.go @@ -97,8 +97,8 @@ func (proxy *MaxscaleProxy) Refresh() error { if cluster.Conf.MxsOn { err := m.Connect() if err != nil { - cluster.sme.AddState("ERR00018", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00018"], err), ErrFrom: "CONF"}) - cluster.sme.CopyOldStateFromUnknowServer(proxy.Name) + cluster.StateMachine.AddState("ERR00018", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00018"], err), ErrFrom: "CONF"}) + cluster.StateMachine.CopyOldStateFromUnknowServer(proxy.Name) return err } } @@ -115,7 +115,7 @@ func (proxy *MaxscaleProxy) Refresh() error { if cluster.Conf.MxsGetInfoMethod == "maxinfo" { _, err := m.GetMaxInfoServers("http://" + proxy.Host + ":" + strconv.Itoa(cluster.Conf.MxsMaxinfoPort) + "/servers") if err != nil { - cluster.sme.AddState("ERR00020", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00020"], server.URL), ErrFrom: "MON", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00020", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00020"], server.URL), ErrFrom: "MON", ServerUrl: proxy.Name}) } srvport, _ := strconv.Atoi(server.Port) mxsConnections := 0 @@ -127,7 +127,7 @@ func (proxy *MaxscaleProxy) Refresh() error { } else { _, err := m.ListServers() if err != nil { - server.ClusterGroup.sme.AddState("ERR00019", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00019"], server.URL), ErrFrom: "MON", ServerUrl: proxy.Name}) + server.ClusterGroup.StateMachine.AddState("ERR00019", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00019"], server.URL), ErrFrom: "MON", ServerUrl: proxy.Name}) } else { if proxy.Tunnel { @@ -206,7 +206,7 @@ func (proxy *MaxscaleProxy) Init() { cluster.LogPrintf(LvlErr, "MaxScale client could not shutdown monitor:%s", err) } } else { - cluster.sme.AddState("ERR00017", state.State{ErrType: "ERROR", ErrDesc: clusterError["ERR00017"], ErrFrom: "TOPO", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00017", state.State{ErrType: "ERROR", ErrDesc: clusterError["ERR00017"], ErrFrom: "TOPO", ServerUrl: proxy.Name}) } err = m.SetServer(cluster.GetMaster().MxsServerName, "master") @@ -276,7 +276,7 @@ func (pr *MaxscaleProxy) SetMaintenance(server *ServerMonitor) { m := maxscale.MaxScale{Host: pr.Host, Port: pr.Port, User: pr.User, Pass: pr.Pass} err := m.Connect() if err != nil { - cluster.sme.AddState("ERR00018", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00018"], err), ErrFrom: "CONF"}) + cluster.StateMachine.AddState("ERR00018", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00018"], err), ErrFrom: "CONF"}) } if server.IsMaintenance { err = m.SetServer(server.MxsServerName, "maintenance") diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 4346434b4..a463a5892 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -116,7 +116,7 @@ func (proxy *ProxySQLProxy) AddShardProxy(shardproxy *MariadbShardProxy) { } psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() @@ -130,7 +130,7 @@ func (proxy *ProxySQLProxy) AddQueryRulesProxysql(rules []proxysql.QueryRule) er } psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return err } defer psql.Connection.Close() @@ -146,7 +146,7 @@ func (proxy *ProxySQLProxy) Init() { psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() @@ -207,7 +207,7 @@ func (proxy *ProxySQLProxy) CertificatesReload() error { cluster := proxy.ClusterGroup psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return err } defer psql.Connection.Close() @@ -223,7 +223,7 @@ func (proxy *ProxySQLProxy) Failover() { cluster := proxy.ClusterGroup psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } @@ -267,8 +267,8 @@ func (proxy *ProxySQLProxy) Refresh() error { psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) - cluster.sme.CopyOldStateFromUnknowServer(proxy.Name) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.CopyOldStateFromUnknowServer(proxy.Name) return err } defer psql.Connection.Close() @@ -333,7 +333,7 @@ func (proxy *ProxySQLProxy) Refresh() error { } err = psql.SetReader(misc.Unbracket(s.Host), s.Port) if err != nil { - cluster.sme.AddState("ERR00069", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00069"], s.URL, err), ErrFrom: "PRX", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00069", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00069"], s.URL, err), ErrFrom: "PRX", ServerUrl: proxy.Name}) } updated = true } @@ -351,7 +351,7 @@ func (proxy *ProxySQLProxy) Refresh() error { cluster.LogPrintf(LvlInfo, "Monitor ProxySQL setting writer offline standalone server %s", s.URL) err = psql.SetOffline(misc.Unbracket(s.Host), s.Port) if err != nil { - cluster.sme.AddState("ERR00070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00070"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00070"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) } updated = true @@ -361,7 +361,7 @@ func (proxy *ProxySQLProxy) Refresh() error { cluster.LogPrintf(LvlInfo, "Monitor ProxySQL setting reader offline standalone server %s", s.URL) err = psql.SetOfflineSoft(misc.Unbracket(s.Host), s.Port) if err != nil { - cluster.sme.AddState("ERR00070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00070"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00070"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) } updated = true @@ -369,7 +369,7 @@ func (proxy *ProxySQLProxy) Refresh() error { // if the master comes back from a previously failed or standalone state, reintroduce it in // the appropriate HostGroup - cluster.sme.AddState("ERR00071", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00071"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00071", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00071"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) if psql.ExistAsWriterOrOffline(misc.Unbracket(s.Host), s.Port) { err = psql.SetOnline(misc.Unbracket(s.Host), s.Port) if err != nil { @@ -410,7 +410,7 @@ func (proxy *ProxySQLProxy) Refresh() error { cluster.LogPrintf(LvlInfo, "Monitor ProxySQL setting reader standalone server %s", s.URL) } if err != nil { - cluster.sme.AddState("ERR00072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00072"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00072"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) } updated = true } else if s.IsSlaveOrSync() && !isFoundBackendRead && !s.IsIgnored() { @@ -423,7 +423,7 @@ func (proxy *ProxySQLProxy) Refresh() error { if s.IsMaster() && cluster.Conf.ProxysqlCopyGrants { myprxusermap, _, err := dbhelper.GetProxySQLUsers(psql.Connection) if err != nil { - cluster.sme.AddState("ERR00053", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00053"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00053", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00053"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) } uniUsers := make(map[string]dbhelper.Grant) dupUsers := make(map[string]string) @@ -432,7 +432,7 @@ func (proxy *ProxySQLProxy) Refresh() error { user, ok := uniUsers[u.User+":"+u.Password] if ok { dupUsers[user.User] = user.User - cluster.sme.AddState("ERR00057", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00057"], user.User), ErrFrom: "MON", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00057", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00057"], user.User), ErrFrom: "MON", ServerUrl: proxy.Name}) } else { if u.Password != "" && u.Password != "invalid" { if u.User != cluster.dbUser { @@ -450,7 +450,7 @@ func (proxy *ProxySQLProxy) Refresh() error { cluster.LogPrintf(LvlInfo, "Add ProxySQL user %s ", user.User) err := psql.AddUser(user.User, user.Password) if err != nil { - cluster.sme.AddState("ERR00054", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00054"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("ERR00054", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00054"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) } else { changedUser = true } @@ -472,11 +472,11 @@ func (proxy *ProxySQLProxy) Refresh() error { } proxy.QueryRules, err = psql.GetQueryRulesRuntime() if err != nil { - cluster.sme.AddState("WARN0092", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0092"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("WARN0092", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0092"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) } proxy.Variables, err = psql.GetVariables() if err != nil { - cluster.sme.AddState("WARN0098", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0098"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) + cluster.StateMachine.AddState("WARN0098", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0098"], err), ErrFrom: "MON", ServerUrl: proxy.Name}) } if proxy.ClusterGroup.Conf.ProxysqlBootstrapVariables { if proxy.Variables["MYSQL-MULTIPLEXING"] == "TRUE" && !proxy.ClusterGroup.Conf.ProxysqlMultiplexing { @@ -538,7 +538,7 @@ func (proxy *ProxySQLProxy) SetMaintenance(s *ServerMonitor) { psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() @@ -564,7 +564,7 @@ func (proxy *ProxySQLProxy) RotateMonitoringPasswords(password string) { cluster := proxy.ClusterGroup psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() @@ -604,7 +604,7 @@ func (proxy *ProxySQLProxy) RotationAdminPasswords(password string) { cluster := proxy.ClusterGroup psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() @@ -630,7 +630,7 @@ func (proxy *ProxySQLProxy) Shutdown() { cluster := proxy.ClusterGroup psql, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() diff --git a/cluster/prx_sphinx.go b/cluster/prx_sphinx.go index 1283be150..86740d48b 100644 --- a/cluster/prx_sphinx.go +++ b/cluster/prx_sphinx.go @@ -1,7 +1,9 @@ // replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL // Copyright 2017-2021 SIGNAL18 CLOUD SAS // Authors: Guillaume Lefranc -// Stephane Varoqui +// +// Stephane Varoqui +// // This source code is licensed under the GNU General Public License, version 3. // Redistribution/Reuse of this code is permitted under the GNU v3 license, as // an additional term, ALL code must carry the original Author(s) credit in comment form. @@ -83,7 +85,7 @@ func (proxy *SphinxProxy) Init() { sphinx, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00058", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00058"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00058", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00058"], err), ErrFrom: "MON"}) return } defer sphinx.Connection.Close() @@ -102,7 +104,7 @@ func (proxy *SphinxProxy) Refresh() error { sphinx, err := proxy.Connect() if err != nil { - cluster.sme.AddState("ERR00058", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00058"], err), ErrFrom: "MON"}) + cluster.StateMachine.AddState("ERR00058", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00058"], err), ErrFrom: "MON"}) return err } defer sphinx.Connection.Close() diff --git a/cluster/srv.go b/cluster/srv.go index 4f41095b6..3441c3bdf 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -330,7 +330,7 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { if err != nil { // Copy the last known server states or they will be cleared at next monitoring loop if server.State != stateFailed { - server.ClusterGroup.sme.CopyOldStateFromUnknowServer(server.URL) + server.ClusterGroup.StateMachine.CopyOldStateFromUnknowServer(server.URL) } // server.ClusterGroup.LogPrintf(LvlDbg, "Failure detection handling for server %s %s", server.URL, err) // server.ClusterGroup.LogPrintf(LvlErr, "Failure detection handling for server %s %s", server.URL, err) @@ -356,7 +356,7 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { server.ClusterGroup.LogPrintf(LvlDbg, "Master not defined") } if server.ClusterGroup.GetMaster() != nil && server.URL == server.ClusterGroup.GetMaster().URL && server.GetCluster().GetTopology() != topoUnknown { - server.FailSuspectHeartbeat = server.ClusterGroup.sme.GetHeartbeats() + server.FailSuspectHeartbeat = server.ClusterGroup.StateMachine.GetHeartbeats() if server.ClusterGroup.GetMaster().FailCount <= server.ClusterGroup.Conf.MaxFail { server.ClusterGroup.LogPrintf("INFO", "Master Failure detected! Retry %d/%d", server.ClusterGroup.master.FailCount, server.ClusterGroup.Conf.MaxFail) } @@ -435,7 +435,7 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { return } // We will leave when in failover to avoid refreshing variables and status - if server.ClusterGroup.sme.IsInFailover() { + if server.ClusterGroup.StateMachine.IsInFailover() { // conn.Close() server.ClusterGroup.LogPrintf(LvlDbg, "Inside failover, skiping refresh") return @@ -451,7 +451,7 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { defer conn.Close() // Reset FailCount - if (server.State != stateFailed && server.State != stateErrorAuth && server.State != stateSuspect) && (server.FailCount > 0) /*&& (((server.ClusterGroup.sme.GetHeartbeats() - server.FailSuspectHeartbeat) * server.ClusterGroup.Conf.MonitoringTicker) > server.ClusterGroup.Conf.FailResetTime)*/ { + if (server.State != stateFailed && server.State != stateErrorAuth && server.State != stateSuspect) && (server.FailCount > 0) /*&& (((server.ClusterGroup.StateMachine.GetHeartbeats() - server.FailSuspectHeartbeat) * server.ClusterGroup.Conf.MonitoringTicker) > server.ClusterGroup.Conf.FailResetTime)*/ { server.FailCount = 0 server.FailSuspectHeartbeat = 0 } @@ -687,19 +687,19 @@ func (server *ServerMonitor) Refresh() error { if err != nil { server.ClusterGroup.SetState("ERR00073", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00073"], server.URL), ErrFrom: "MON"}) } - if server.ClusterGroup.sme.GetHeartbeats()%30 == 0 { + if server.ClusterGroup.StateMachine.GetHeartbeats()%30 == 0 { server.SaveInfos() if server.GetCluster().GetTopology() != topoActivePassive && server.GetCluster().GetTopology() != topoMultiMasterWsrep { server.CheckPrivileges() } } else { - server.ClusterGroup.sme.PreserveState("ERR00007") - server.ClusterGroup.sme.PreserveState("ERR00006") - server.ClusterGroup.sme.PreserveState("ERR00008") - server.ClusterGroup.sme.PreserveState("ERR00015") - server.ClusterGroup.sme.PreserveState("ERR00078") - server.ClusterGroup.sme.PreserveState("ERR00009") + server.ClusterGroup.StateMachine.PreserveState("ERR00007") + server.ClusterGroup.StateMachine.PreserveState("ERR00006") + server.ClusterGroup.StateMachine.PreserveState("ERR00008") + server.ClusterGroup.StateMachine.PreserveState("ERR00015") + server.ClusterGroup.StateMachine.PreserveState("ERR00078") + server.ClusterGroup.StateMachine.PreserveState("ERR00009") } if server.ClusterGroup.Conf.FailEventScheduler && server.IsMaster() && !server.HasEventScheduler() { server.ClusterGroup.LogPrintf(LvlInfo, "Enable Event Scheduler on master") @@ -861,7 +861,7 @@ func (server *ServerMonitor) Refresh() error { } // monitor plugins if !server.DBVersion.IsPPostgreSQL() { - if server.ClusterGroup.sme.GetHeartbeats()%60 == 0 { + if server.ClusterGroup.StateMachine.GetHeartbeats()%60 == 0 { if server.ClusterGroup.Conf.MonitorPlugins { server.Plugins, logs, err = dbhelper.GetPlugins(server.Conn, server.DBVersion) server.HaveMetaDataLocksLog = server.HasInstallPlugin("METADATA_LOCK_INFO") diff --git a/cluster/srv_chk.go b/cluster/srv_chk.go index 34ad6f18b..01bb68bb6 100644 --- a/cluster/srv_chk.go +++ b/cluster/srv_chk.go @@ -24,14 +24,14 @@ func (server *ServerMonitor) CheckMaxConnections() { maxCx, _ := strconv.ParseInt(server.Variables["MAX_CONNECTIONS"], 10, 64) curCx, _ := strconv.ParseInt(server.Status["THREADS_CONNECTED"], 10, 64) if curCx > maxCx*80/100 { - server.ClusterGroup.sme.AddState("ERR00076", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00076"], server.URL), ErrFrom: "MON", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("ERR00076", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["ERR00076"], server.URL), ErrFrom: "MON", ServerUrl: server.URL}) } } func (server *ServerMonitor) CheckVersion() { if server.DBVersion.IsMariaDB() && ((server.DBVersion.Major == 10 && server.DBVersion.Minor == 4 && server.DBVersion.Release < 12) || (server.DBVersion.Major == 10 && server.DBVersion.Minor == 5 && server.DBVersion.Release < 1)) { - server.ClusterGroup.sme.AddState("WARN0099", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0099"], server.URL), ErrFrom: "MON", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0099", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0099"], server.URL), ErrFrom: "MON", ServerUrl: server.URL}) } } @@ -40,7 +40,7 @@ func (server *ServerMonitor) CheckVersion() { func (server *ServerMonitor) CheckDisks() { for _, d := range server.Disks { if d.Used/d.Total*100 > int32(server.ClusterGroup.Conf.MonitorDiskUsagePct) { - server.ClusterGroup.sme.AddState("ERR00079", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00079"], server.URL), ErrFrom: "MON", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("ERR00079", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00079"], server.URL), ErrFrom: "MON", ServerUrl: server.URL}) } } } @@ -60,7 +60,7 @@ func (server *ServerMonitor) CheckReplication() string { return "Galera Late" } } - if server.ClusterGroup.sme.IsInFailover() { + if server.ClusterGroup.StateMachine.IsInFailover() { return "In Failover" } if (server.IsDown()) && server.IsSlave == false { @@ -154,7 +154,7 @@ func (server *ServerMonitor) CheckSlaveSettings() { server.ClusterGroup.LogPrintf("DEBUG", "Enforce semisync on slave %s", sl.URL) dbhelper.InstallSemiSync(sl.Conn) } else if sl.IsIgnored() == false && sl.HaveSemiSync == false && server.ClusterGroup.GetTopology() != topoMultiMasterWsrep { - server.ClusterGroup.sme.AddState("WARN0048", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0048"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0048", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0048"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceBinlogRow && sl.HaveBinlogRow == false { @@ -163,7 +163,7 @@ func (server *ServerMonitor) CheckSlaveSettings() { server.ClusterGroup.LogPrintf("INFO", "Enforce binlog format ROW on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.HaveBinlogRow == false && (server.ClusterGroup.Conf.AutorejoinFlashback == true || server.ClusterGroup.GetTopology() == topoMultiMasterWsrep) { //galera or binlog flashback need row based binlog - server.ClusterGroup.sme.AddState("WARN0049", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0049"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0049", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0049"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceSlaveReadOnly && sl.ReadOnly == "OFF" && !server.ClusterGroup.IsInIgnoredReadonly(server) && !server.ClusterGroup.IsMultiMaster() { // In non-multimaster mode, enforce read-only flag if the option is set @@ -174,54 +174,54 @@ func (server *ServerMonitor) CheckSlaveSettings() { dbhelper.SetSlaveHeartbeat(sl.Conn, "1", server.ClusterGroup.Conf.MasterConn, server.DBVersion) server.ClusterGroup.LogPrintf("INFO", "Enforce heartbeat to 1s on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.GetReplicationHearbeatPeriod() > 1 { - server.ClusterGroup.sme.AddState("WARN0050", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0050"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0050", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0050"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceSlaveGtid && sl.GetReplicationUsingGtid() == "No" { dbhelper.SetSlaveGTIDMode(sl.Conn, "slave_pos", server.ClusterGroup.Conf.MasterConn, server.DBVersion) server.ClusterGroup.LogPrintf("INFO", "Enforce GTID replication on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.GetReplicationUsingGtid() == "No" && server.ClusterGroup.GetTopology() != topoMultiMasterWsrep && server.IsMariaDB() { - server.ClusterGroup.sme.AddState("WARN0051", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0051"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0051", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0051"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceSlaveGtidStrict && sl.IsReplicationUsingGtidStrict() == false && server.ClusterGroup.GetTopology() != topoMultiMasterWsrep && server.IsMariaDB() { dbhelper.SetSlaveGTIDModeStrict(sl.Conn, server.DBVersion) server.ClusterGroup.LogPrintf("INFO", "Enforce GTID strict mode on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.IsReplicationUsingGtidStrict() == false && server.ClusterGroup.GetTopology() != topoMultiMasterWsrep && server.IsMariaDB() { - server.ClusterGroup.sme.AddState("WARN0058", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0058"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0058", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0058"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceSyncInnoDB && sl.HaveInnodbTrxCommit == false { dbhelper.SetSyncInnodb(sl.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce InnoDB durability on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.HaveInnodbTrxCommit == false { - server.ClusterGroup.sme.AddState("WARN0052", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0052"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0052", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0052"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceBinlogChecksum && sl.HaveChecksum == false { dbhelper.SetBinlogChecksum(sl.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce checksum on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.HaveChecksum == false { - server.ClusterGroup.sme.AddState("WARN0053", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0053"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0053", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0053"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceBinlogSlowqueries && sl.HaveBinlogSlowqueries == false { dbhelper.SetBinlogSlowqueries(sl.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce log slow queries of replication on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.HaveBinlogSlowqueries == false { - server.ClusterGroup.sme.AddState("WARN0054", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0054"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0054", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0054"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceBinlogAnnotate && sl.HaveBinlogAnnotate == false && server.IsMariaDB() { dbhelper.SetBinlogAnnotate(sl.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce annotate on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.HaveBinlogAnnotate == false && server.IsMariaDB() { - server.ClusterGroup.sme.AddState("WARN0055", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0055"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0055", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0055"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.ClusterGroup.Conf.ForceBinlogCompress && sl.HaveBinlogCompress == false && sl.DBVersion.IsMariaDB() && sl.DBVersion.Major >= 10 && sl.DBVersion.Minor >= 2 { dbhelper.SetBinlogCompress(sl.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce binlog compression on slave %s", sl.URL) } else if sl.IsIgnored() == false && sl.HaveBinlogCompress == false && sl.DBVersion.IsMariaDB() && sl.DBVersion.Major >= 10 && sl.DBVersion.Minor >= 2 { - server.ClusterGroup.sme.AddState("WARN0056", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0056"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0056", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0056"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if sl.IsIgnored() == false && sl.HaveBinlogSlaveUpdates == false { - server.ClusterGroup.sme.AddState("WARN0057", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0057"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0057", state.State{ErrType: LvlWarn, ErrDesc: fmt.Sprintf(clusterError["WARN0057"], sl.URL), ErrFrom: "TOPO", ServerUrl: sl.URL}) } if server.IsAcid() == false && server.ClusterGroup.IsDiscovered() { @@ -236,49 +236,49 @@ func (server *ServerMonitor) CheckMasterSettings() { server.ClusterGroup.LogPrintf("INFO", "Enforce semisync on Master %s", server.URL) dbhelper.InstallSemiSync(server.Conn) } else if server.HaveSemiSync == false && server.ClusterGroup.GetTopology() != topoMultiMasterWsrep && server.ClusterGroup.GetTopology() != topoMultiMasterGrouprep { - server.ClusterGroup.sme.AddState("WARN0060", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0060"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0060", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0060"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.ClusterGroup.Conf.ForceBinlogRow && server.HaveBinlogRow == false { dbhelper.SetBinlogFormat(server.Conn, "ROW") server.ClusterGroup.LogPrintf("INFO", "Enforce binlog format ROW on Master %s", server.URL) } else if server.HaveBinlogRow == false && server.ClusterGroup.Conf.AutorejoinFlashback == true { - server.ClusterGroup.sme.AddState("WARN0061", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0061"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0061", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0061"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.ClusterGroup.Conf.ForceSyncBinlog && server.HaveBinlogSync == false { dbhelper.SetSyncBinlog(server.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce sync binlog on Master %s", server.URL) } else if server.HaveBinlogSync == false { - server.ClusterGroup.sme.AddState("WARN0062", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0062"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0062", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0062"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.ClusterGroup.Conf.ForceSyncInnoDB && server.HaveBinlogSync == false { dbhelper.SetSyncInnodb(server.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce innodb durability on Master %s", server.URL) } else if server.HaveBinlogSync == false { - server.ClusterGroup.sme.AddState("WARN0064", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0064"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0064", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0064"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.ClusterGroup.Conf.ForceBinlogAnnotate && server.HaveBinlogAnnotate == false && server.IsMariaDB() { dbhelper.SetBinlogAnnotate(server.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce binlog annotate on master %s", server.URL) } else if server.HaveBinlogAnnotate == false && server.IsMariaDB() { - server.ClusterGroup.sme.AddState("WARN0067", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0067"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0067", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0067"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.ClusterGroup.Conf.ForceBinlogChecksum && server.HaveChecksum == false { dbhelper.SetBinlogChecksum(server.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce ckecksum annotate on master %s", server.URL) } else if server.HaveChecksum == false { - server.ClusterGroup.sme.AddState("WARN0065", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0065"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0065", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0065"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.ClusterGroup.Conf.ForceBinlogCompress && server.HaveBinlogCompress == false && server.IsMariaDB() && server.DBVersion.Major >= 10 && server.DBVersion.Minor >= 2 { dbhelper.SetBinlogCompress(server.Conn) server.ClusterGroup.LogPrintf("INFO", "Enforce binlog compression on master %s", server.URL) } else if server.HaveBinlogCompress == false && server.DBVersion.IsMariaDB() && server.DBVersion.Major >= 10 && server.DBVersion.Minor >= 2 { - server.ClusterGroup.sme.AddState("WARN0068", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0068"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0068", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0068"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.HaveBinlogSlaveUpdates == false { - server.ClusterGroup.sme.AddState("WARN0069", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0069"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0069", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0069"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.HaveGtidStrictMode == false && server.DBVersion.Flavor == "MariaDB" && server.ClusterGroup.GetTopology() != topoMultiMasterWsrep && server.ClusterGroup.GetTopology() != topoMultiMasterGrouprep { - server.ClusterGroup.sme.AddState("WARN0070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0070"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0070"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.IsAcid() == false && server.ClusterGroup.IsDiscovered() { server.ClusterGroup.SetState("WARN0007", state.State{ErrType: "WARNING", ErrDesc: "At least one server is not ACID-compliant. Please make sure that sync_binlog and innodb_flush_log_at_trx_commit are set to 1", ErrFrom: "CONF", ServerUrl: server.URL}) @@ -292,7 +292,7 @@ func (server *ServerMonitor) CheckSlaveSameMasterGrants() bool { } for _, user := range server.ClusterGroup.GetMaster().Users { if _, ok := server.Users["'"+user.User+"'@'"+user.Host+"'"]; !ok { - server.ClusterGroup.sme.AddState("ERR00056", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00056"], fmt.Sprintf("'%s'@'%s'", user.User, user.Host), server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("ERR00056", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00056"], fmt.Sprintf("'%s'@'%s'", user.User, user.Host), server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) return false } } diff --git a/cluster/srv_job.go b/cluster/srv_job.go index 3df0d7712..c8ad3c7f5 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -526,7 +526,7 @@ func (server *ServerMonitor) JobsCheckRunning() error { rows.Scan(&task.task, &task.ct) if task.ct > 0 { if task.ct > 10 { - server.ClusterGroup.sme.AddState("ERR00060", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["ERR00060"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("ERR00060", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["ERR00060"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) purge := "DELETE from replication_manager_schema.jobs WHERE task='" + task.task + "' AND done=0 AND result IS NULL order by start asc limit " + strconv.Itoa(task.ct-1) err := server.ExecQueryNoBinLog(purge) if err != nil { @@ -534,31 +534,31 @@ func (server *ServerMonitor) JobsCheckRunning() error { } } else { if task.task == "optimized" { - server.ClusterGroup.sme.AddState("WARN0072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0072"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0072"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "restart" { - server.ClusterGroup.sme.AddState("WARN0096", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0096"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0096", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0096"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "stop" { - server.ClusterGroup.sme.AddState("WARN0097", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0097"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0097", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0097"], server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "xtrabackup" { - server.ClusterGroup.sme.AddState("WARN0073", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0073"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0073", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0073"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "mariabackup" { - server.ClusterGroup.sme.AddState("WARN0073", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0073"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0073", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0073"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "reseedxtrabackup" { - server.ClusterGroup.sme.AddState("WARN0074", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0074"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0074", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0074"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "reseedmariabackup" { - server.ClusterGroup.sme.AddState("WARN0074", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0074"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0074", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0074"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "reseedmysqldump" { - server.ClusterGroup.sme.AddState("WARN0075", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0075"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0075", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0075"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "reseedmydumper" { - server.ClusterGroup.sme.AddState("WARN0075", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0075"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0075", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0075"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "flashbackxtrabackup" { - server.ClusterGroup.sme.AddState("WARN0076", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0076"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0076", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0076"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "flashbackmariabackup" { - server.ClusterGroup.sme.AddState("WARN0076", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0076"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0076", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0076"], server.ClusterGroup.Conf.BackupPhysicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "flashbackmydumper" { - server.ClusterGroup.sme.AddState("WARN0077", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0077"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0077", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0077"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } else if task.task == "flashbackmysqldump" { - server.ClusterGroup.sme.AddState("WARN0077", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0077"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) + server.ClusterGroup.StateMachine.AddState("WARN0077", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(server.ClusterGroup.GetErrorList()["WARN0077"], server.ClusterGroup.Conf.BackupLogicalType, server.URL), ErrFrom: "JOB", ServerUrl: server.URL}) } } diff --git a/cluster/srv_rejoin.go b/cluster/srv_rejoin.go index 096dffd99..a9ed221ee 100644 --- a/cluster/srv_rejoin.go +++ b/cluster/srv_rejoin.go @@ -49,7 +49,7 @@ func (server *ServerMonitor) RejoinMaster() error { return nil } - if server.ClusterGroup.sme.IsInFailover() { + if server.ClusterGroup.StateMachine.IsInFailover() { return nil } if server.ClusterGroup.Conf.LogLevel > 2 { @@ -570,7 +570,7 @@ func (server *ServerMonitor) rejoinSlave(ss dbhelper.SlaveStatus) error { } else { //Adding state waiting for old master to rejoin in positional mode // this state prevent crash info to be removed - server.ClusterGroup.sme.AddState("ERR00049", state.State{ErrType: "ERRRO", ErrDesc: fmt.Sprintf(clusterError["ERR00049"]), ErrFrom: "TOPO"}) + server.ClusterGroup.StateMachine.AddState("ERR00049", state.State{ErrType: "ERRRO", ErrDesc: fmt.Sprintf(clusterError["ERR00049"]), ErrFrom: "TOPO"}) } } } @@ -704,10 +704,10 @@ func (cluster *Cluster) RejoinFixRelay(slave *ServerMonitor, relay *ServerMonito if cluster.GetTopology() == topoMultiMasterRing || cluster.GetTopology() == topoMultiMasterWsrep { return nil } - cluster.sme.AddState("ERR00045", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00045"]), ErrFrom: "TOPO"}) + cluster.StateMachine.AddState("ERR00045", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00045"]), ErrFrom: "TOPO"}) if slave.GetReplicationDelay() > cluster.Conf.FailMaxDelay { - cluster.sme.AddState("ERR00046", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00046"]), ErrFrom: "TOPO"}) + cluster.StateMachine.AddState("ERR00046", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00046"]), ErrFrom: "TOPO"}) return nil } else { ss, err := slave.GetSlaveStatus(slave.ReplicationSourceName) diff --git a/config/config.go b/config/config.go index 2660fb5a9..0da6e5e9e 100644 --- a/config/config.go +++ b/config/config.go @@ -23,6 +23,7 @@ import ( "time" "github.com/signal18/replication-manager/share" + "github.com/spf13/viper" ) type Config struct { @@ -1077,3 +1078,73 @@ func (conf Config) PrintConf() { } } + +func (conf Config) MergeConfig(path string, name string, ImmMap map[string]interface{}, DefMap map[string]interface{}, confPath string) error { + dynRead := viper.GetViper() + dynRead.SetConfigType("toml") + + dynMap := make(map[string]interface{}) + + if _, err := os.Stat(path + "/" + name + "/overwrite.toml"); os.IsNotExist(err) { + fmt.Printf("No monitoring saved config found " + path + "/" + name + "/overwrite.toml") + return err + } else { + fmt.Printf("Parsing saved config from working directory %s ", path+"/"+name+"/overwrite.toml") + if _, err := os.Stat(path + "/" + name + "/overwrite.toml"); !os.IsNotExist(err) { + dynRead.SetConfigFile(path + "/" + name + "/overwrite.toml") + } + //dynRead = dynRead.Sub("overwrite-" + name) + for _, f := range dynRead.AllKeys() { + v := dynRead.Get(f) + _, ok := ImmMap[f] + if ok && v != nil && v != ImmMap[f] { + fmt.Printf("viper value : %s = %s\n", f, v) + + fmt.Printf("Imm value : %s = %s\n", f, ImmMap[f]) + _, ok := DefMap[f] + if ok && v != DefMap[f] { + dynMap[f] = dynRead.Get(f) + fmt.Printf("default value : %s = %s\n", f, DefMap[f]) + } + if !ok { + dynMap[f] = dynRead.Get(f) + } + + } + + } + + err = dynRead.MergeInConfig() + if err != nil { + fmt.Printf("Config error in " + path + "/" + name + "/overwrite.toml" + ":" + err.Error()) + return err + } + } + + //dynRead.Unmarshal(&conf) + //conf.PrintConf() + fmt.Printf("%v", DefMap) + conf.WriteMergeConfig(confPath, dynMap) + return nil +} + +func (conf Config) WriteMergeConfig(confPath string, dynMap map[string]interface{}) error { + input, err := ioutil.ReadFile("myfile") + if err != nil { + log.Fatalln(err) + } + + lines := strings.Split(string(input), "\n") + + for i, line := range lines { + if strings.Contains(line, "]") { + lines[i] = "LOL" + } + } + output := strings.Join(lines, "\n") + err = ioutil.WriteFile("myfile", []byte(output), 0644) + if err != nil { + log.Fatalln(err) + } + return nil +} diff --git a/server/server.go b/server/server.go index 4cfb228c6..ac43f8fc9 100644 --- a/server/server.go +++ b/server/server.go @@ -384,6 +384,10 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { conf.WorkingDir = fistRead.GetString("default.monitoring-datadir") } + if fistRead.GetString("default.git-url") != "" && fistRead.GetString("default.git-acces-token") != "" { + cluster.CloneConfigFromGit(fistRead.GetString("default.git-url"), fistRead.GetString("default.git-acces-token"), conf.WorkingDir) + } + dynRead := viper.GetViper() dynRead.SetConfigType("toml") @@ -397,25 +401,39 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { for _, f := range files { if f.IsDir() && f.Name() != "graphite" { //load config file from git hub - if fistRead.GetString("default.git-url") != "" && fistRead.GetString("default.git-acces-token") != "" { - cluster.CloneConfigFromGit(fistRead.GetString("default.git-url"), fistRead.GetString("default.git-acces-token"), conf.WorkingDir, f.Name()) - } - //fistRead.SetConfigName(f.Name()) - dynRead.SetConfigName(f.Name()) - if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml"); os.IsNotExist(err) { - log.Warning("No monitoring saved config found " + conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") + fistRead.SetConfigName(f.Name()) + dynRead.SetConfigName("overwrite-" + f.Name()) + if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml"); os.IsNotExist(err) || f.Name() == "overwrite" { + if f.Name() != "overwrite" { + log.Warning("No monitoring saved config found " + conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") + } + } else { log.Infof("Parsing saved config from working directory %s ", conf.WorkingDir+"/"+f.Name()+"/"+f.Name()+".toml") fistRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") - dynRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml") + err := fistRead.MergeInConfig() - err = dynRead.MergeInConfig() if err != nil { log.Fatal("Config error in " + conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml" + ":" + err.Error()) } } + /* + if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/overwrite.toml"); os.IsNotExist(err) { + log.Warning("No monitoring saved config found " + conf.WorkingDir + "/" + f.Name() + "/overwrite.toml") + } else { + log.Infof("Parsing saved config from working directory %s ", conf.WorkingDir+"/"+f.Name()+"/"+f.Name()+".toml") + if _, err := os.Stat(conf.WorkingDir + "/" + f.Name() + "/" + f.Name() + ".toml"); !os.IsNotExist(err) { + dynRead.SetConfigFile(conf.WorkingDir + "/" + f.Name() + "/overwrite.toml") + } + + err = dynRead.MergeInConfig() + if err != nil { + log.Fatal("Config error in " + conf.WorkingDir + "/" + f.Name() + "/overwrite.toml" + ":" + err.Error()) + } + }*/ + } } //fmt.Printf("%+v\n", dynRead.AllSettings()) @@ -429,8 +447,6 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { var strClusters string strClusters = cfgGroup - //fmt.Printf("%+v\n", fistRead.AllSettings()) - //if cluster name is empty, go discover cluster if strClusters == "" { // Discovering the clusters from all merged conf files build clusterDiscovery map diff --git a/server/server_cmd.go b/server/server_cmd.go index 9ddc5bfa1..41f1b186e 100644 --- a/server/server_cmd.go +++ b/server/server_cmd.go @@ -73,17 +73,7 @@ var versionCmd = &cobra.Command{ }, } -var configMergeCmd = &cobra.Command{ - Use: "config-merge", - Short: "Merges the initial configuration file with the dynamic one", - Long: `Merges all parameters modified in dynamic mode with the original parameters (including immutable parameters) by merging the config files generated by the dynamic mode. Be careful, this command overwrites the original config file.`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Start config merge command !!\n") - fmt.Println("Cluster: ", cfgGroup) - fmt.Println("Config : ", conf.ConfigFile) - - }, -} +var overwriteConf config.Config func init() { @@ -99,11 +89,12 @@ func init() { rootCmd.PersistentFlags().BoolVar(&conf.Verbose, "verbose", false, "Print detailed execution info") rootCmd.PersistentFlags().StringVar(&memprofile, "memprofile", "", "Write a memory profile to this file readable by pprof") - configMergeCmd.PersistentFlags().StringVar(&cfgGroup, "cluster", "", "Configuration group (default is none)") - configMergeCmd.PersistentFlags().StringVar(&conf.ConfigFile, "config", "", "Configuration file (default is config.toml)") + //configMergeCmd.PersistentFlags().StringVar(&cfgGroup, "cluster", "", "Cluster name (default is none)") + //configMergeCmd.PersistentFlags().StringVar(&conf.ConfigFile, "config", "", "Configuration file (default is config.toml)") + rootCmd.PersistentFlags().StringVar(&conf.WorkingDir, "monitoring-datadir", "", "Configuration file (default is config.toml)") rootCmd.AddCommand(versionCmd) - rootCmd.AddCommand(configMergeCmd) + //rootCmd.AddCommand(configMergeCmd) } diff --git a/server/server_monitor.go b/server/server_monitor.go index 1fb9e3eae..11d3c8ff9 100644 --- a/server/server_monitor.go +++ b/server/server_monitor.go @@ -14,6 +14,7 @@ package server import ( "bytes" + "fmt" "hash/crc64" "io/ioutil" "runtime" @@ -39,6 +40,7 @@ func init() { //monitorCmd.AddCommand(rootCmd) rootCmd.AddCommand(monitorCmd) + rootCmd.AddCommand(configMergeCmd) if WithDeprecate == "ON" { // initDeprecated() // not needed used alias in main } @@ -557,8 +559,12 @@ func init() { } } + //configMergeCmd.PersistentFlags().StringVar(&cfgGroup, "cluster", "", "Cluster name (default is none)") + //configMergeCmd.PersistentFlags().StringVar(&conf.ConfigFile, "config", "", "Configuration file (default is config.toml)") + //cobra.OnInitialize() initLogFlags(monitorCmd) + initLogFlags(configMergeCmd) //conf des defaults flag sans les paramètres en ligne de commande v := viper.GetViper() @@ -571,19 +577,7 @@ func init() { } viper.BindPFlags(monitorCmd.Flags()) - - /* - var test config.Config - - values := reflect.ValueOf(test) - types := values.Type() - for i := 0; i < values.NumField(); i++ { - fmt.Printf("HAS CHANGED FROM DEFAULT : %s\n", types.Field(i).Name) - if monitorCmd.Flags().Lookup(types.Field(i).Name).Changed { - - fmt.Printf("HAS CHANGED FROM DEFAULT : %s, %s\n", types.Field(i).Name, values.Field(i).String()) - } - }*/ + viper.BindPFlags(configMergeCmd.Flags()) } @@ -698,6 +692,23 @@ For interacting with this daemon use, }, } +var configMergeCmd = &cobra.Command{ + Use: "config-merge", + Short: "Merges the initial configuration file with the dynamic one", + Long: `Merges all parameters modified in dynamic mode with the original parameters (including immutable parameters) by merging the config files generated by the dynamic mode. Be careful, this command overwrites the original config file.`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Start config merge command !!\n") + fmt.Println("Cluster: ", cfgGroup) + fmt.Println("Config : ", conf.ConfigFile) + RepMan = new(ReplicationManager) + RepMan.DefaultFlagMap = defaultFlagMap + RepMan.InitConfig(conf) + ImmFlagMap := RepMan.ImmuableFlagMaps[cfgGroup] + conf.MergeConfig(conf.WorkingDir, cfgGroup, ImmFlagMap, RepMan.DefaultFlagMap, conf.ConfigFile) + //RepMan.DynamicFlagMaps + }, +} + func GetCommandLineFlag(cmd *cobra.Command) []string { var cmd_flag []string flag := viper.AllKeys() diff --git a/utils/state/state.go b/utils/state/state.go index e75547c86..23738d192 100644 --- a/utils/state/state.go +++ b/utils/state/state.go @@ -62,16 +62,16 @@ func (m Map) Search(key string) bool { } type StateMachine struct { - CurState *Map - OldState *Map - discovered bool - sla Sla - lastState int64 - heartbeats int64 - inFailover bool - inSchemaMonitor bool - SchemaMonitorStartTime int64 - SchemaMonitorEndTime int64 + CurState *Map `json:"-"` + OldState *Map `json:"-"` + Discovered bool `json:"discovered"` + sla Sla `json:"-"` + lastState int64 `json:"-"` + heartbeats int64 `json:"-"` + InFailover bool `json:"inFailover"` + InSchemaMonitor bool `json:"inSchemaMonitor"` + SchemaMonitorStartTime int64 `json:"-"` + SchemaMonitorEndTime int64 `json:"-"` sync.Mutex } @@ -123,7 +123,7 @@ func (SM *StateMachine) Init() { SM.CurState = NewMap() SM.OldState = NewMap() - SM.discovered = false + SM.Discovered = false SM.sla.Init() SM.lastState = 0 SM.heartbeats = 0 @@ -132,34 +132,34 @@ func (SM *StateMachine) Init() { func (SM *StateMachine) SetMonitorSchemaState() { SM.Lock() SM.SchemaMonitorStartTime = time.Now().Unix() - SM.inSchemaMonitor = true + SM.InSchemaMonitor = true SM.Unlock() } func (SM *StateMachine) RemoveMonitorSchemaState() { SM.Lock() - SM.inSchemaMonitor = false + SM.InSchemaMonitor = false SM.SchemaMonitorEndTime = time.Now().Unix() SM.Unlock() } func (SM *StateMachine) SetFailoverState() { SM.Lock() - SM.inFailover = true + SM.InFailover = true SM.Unlock() } func (SM *StateMachine) RemoveFailoverState() { SM.Lock() - SM.inFailover = false + SM.InFailover = false SM.Unlock() } func (SM *StateMachine) IsInFailover() bool { - return SM.inFailover + return SM.InFailover } func (SM *StateMachine) IsInSchemaMonitor() bool { - return SM.inSchemaMonitor + return SM.InSchemaMonitor } func (SM *StateMachine) AddState(key string, s State) { @@ -220,7 +220,7 @@ func (SM *StateMachine) IsFailable() bool { return false } } - SM.discovered = true + SM.Discovered = true SM.Unlock() return true @@ -268,19 +268,19 @@ func (SM *StateMachine) CanMonitor() bool { return false } } - SM.discovered = true + SM.Discovered = true SM.Unlock() return true } func (SM *StateMachine) UnDiscovered() { SM.Lock() - SM.discovered = false + SM.Discovered = false SM.Unlock() } func (SM *StateMachine) IsDiscovered() bool { - return SM.discovered + return SM.Discovered } func (SM *StateMachine) GetStates() []string { From e2d1754496f538eb7c3770f0f22ab153d5821f93 Mon Sep 17 00:00:00 2001 From: emma Date: Wed, 3 May 2023 16:47:23 +0200 Subject: [PATCH 21/39] add config-merge command and add a button to delete a cluster (not fully implemented yet) --- cluster/cluster_acl.go | 5 +++ cluster/cluster_add.go | 3 +- cluster/cluster_set.go | 4 +- cluster/prx_mariadbshardproxy.go | 2 +- cluster/prx_proxysql.go | 2 + config/config.go | 48 ++++++++++++--------- server/api.go | 7 +++ server/api_cluster.go | 5 +++ server/server.go | 14 +++--- server/server_add.go | 14 +----- share/dashboard/app/dashboard.js | 33 +++++++++++++- share/dashboard/static/menu-monitor.html | 7 +++ share/dashboard/static/monitor-dialogs.html | 17 ++++++++ 13 files changed, 117 insertions(+), 44 deletions(-) diff --git a/cluster/cluster_acl.go b/cluster/cluster_acl.go index 993280ea4..9873864bf 100644 --- a/cluster/cluster_acl.go +++ b/cluster/cluster_acl.go @@ -598,6 +598,11 @@ func (cluster *Cluster) IsURLPassACL(strUser string, URL string) bool { return true } } + if cluster.APIUsers[strUser].Grants[config.GrantClusterDelete] { + if strings.Contains(URL, "/api/clusters/actions/delete") { + return true + } + } /* case cluster.APIUsers[strUser].Grants[config.GrantClusterGrant] == true: return false case cluster.APIUsers[strUser].Grants[config.GrantClusterDropMonitor] == true: diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 897e8802c..fc4262f28 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -96,7 +96,7 @@ func (cluster *Cluster) AddSeededProxy(prx string, srv string, port string, user } case config.ConstProxySqlproxy: cluster.Conf.ProxysqlOn = true - cluster.Conf.ProxysqlAdminPort = port + cluster.Conf.ProxysqlPort = port if user != "" || password != "" { cluster.Conf.ProxysqlUser = user cluster.Conf.ProxysqlPassword = password @@ -118,6 +118,7 @@ func (cluster *Cluster) AddSeededProxy(prx string, srv string, port string, user cluster.Conf.MdbsProxyHosts = srv + ":" + port } } + cluster.SetClusterProxySqlCredentialsFromConfig() cluster.StateMachine.SetFailoverState() cluster.Lock() cluster.newProxyList() diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 7b822554c..d42b2167d 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -1015,8 +1015,8 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { if err != nil { cluster.LogPrintf(LvlErr, "Fail adding shard proxy monitor on 3306 %s", err) } - - err = cluster.AddSeededProxy(config.ConstProxySqlproxy, "proxysql1", cluster.Conf.ProxysqlPort, "", "") + cluster.Conf.ProxysqlUser = "external" + err = cluster.AddSeededProxy(config.ConstProxySqlproxy, "proxysql1", cluster.Conf.ProxysqlPort, cluster.Conf.ProxysqlUser, cluster.Conf.ProxysqlPassword) if err != nil { cluster.LogPrintf(LvlErr, "Fail adding proxysql monitor on %s %s", cluster.Conf.ProxysqlPort, err) } diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index c73b24ce7..33d55cc25 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -213,7 +213,7 @@ func (proxy *MariadbShardProxy) Refresh() error { go proxy.ShardProxy.Ping(wg) wg.Wait() - err := proxy.Refresh() + err := proxy.ShardProxy.Refresh() if err != nil { //proxy.ClusterGroup.LogPrintf(LvlErr, "Sharding proxy refresh error (%s)", err) return err diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index a463a5892..e5bf406c1 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -260,7 +260,9 @@ func (proxy *ProxySQLProxy) Failover() { } func (proxy *ProxySQLProxy) Refresh() error { + cluster := proxy.ClusterGroup + cluster.LogPrintf(LvlErr, "ProxySQL port : %s, user %s, pass %s\n", proxy.Port, proxy.User, proxy.Pass) if cluster.Conf.ProxysqlOn == false { return nil } diff --git a/config/config.go b/config/config.go index 0da6e5e9e..424a32125 100644 --- a/config/config.go +++ b/config/config.go @@ -679,6 +679,7 @@ const ( GrantDBConfigGet string = "db-config-get" GrantDBDebug string = "db-debug" GrantClusterCreate string = "cluster-create" + GrantClusterDelete string = "cluster-delete" GrantClusterDrop string = "cluster-drop" GrantClusterCreateMonitor string = "cluster-create-monitor" GrantClusterDropMonitor string = "cluster-drop-monitor" @@ -1081,6 +1082,7 @@ func (conf Config) PrintConf() { func (conf Config) MergeConfig(path string, name string, ImmMap map[string]interface{}, DefMap map[string]interface{}, confPath string) error { dynRead := viper.GetViper() + viper.SetConfigName("overwrite") dynRead.SetConfigType("toml") dynMap := make(map[string]interface{}) @@ -1090,21 +1092,21 @@ func (conf Config) MergeConfig(path string, name string, ImmMap map[string]inter return err } else { fmt.Printf("Parsing saved config from working directory %s ", path+"/"+name+"/overwrite.toml") - if _, err := os.Stat(path + "/" + name + "/overwrite.toml"); !os.IsNotExist(err) { - dynRead.SetConfigFile(path + "/" + name + "/overwrite.toml") + + dynRead.AddConfigPath(path + "/" + name) + err := dynRead.ReadInConfig() + if err != nil { + fmt.Printf("Could not read in config : " + path + "/" + name + "/overwrite.toml") } - //dynRead = dynRead.Sub("overwrite-" + name) + dynRead = dynRead.Sub("overwrite-" + name) + fmt.Printf("%v\n", dynRead.AllSettings()) for _, f := range dynRead.AllKeys() { v := dynRead.Get(f) _, ok := ImmMap[f] if ok && v != nil && v != ImmMap[f] { - fmt.Printf("viper value : %s = %s\n", f, v) - - fmt.Printf("Imm value : %s = %s\n", f, ImmMap[f]) _, ok := DefMap[f] if ok && v != DefMap[f] { dynMap[f] = dynRead.Get(f) - fmt.Printf("default value : %s = %s\n", f, DefMap[f]) } if !ok { dynMap[f] = dynRead.Get(f) @@ -1113,23 +1115,17 @@ func (conf Config) MergeConfig(path string, name string, ImmMap map[string]inter } } - - err = dynRead.MergeInConfig() - if err != nil { - fmt.Printf("Config error in " + path + "/" + name + "/overwrite.toml" + ":" + err.Error()) - return err - } } - //dynRead.Unmarshal(&conf) - //conf.PrintConf() - fmt.Printf("%v", DefMap) + //fmt.Printf("%v\n", DefMap) + //fmt.Printf("%v\n", dynMap) + //fmt.Printf("%v\n", ImmMap) conf.WriteMergeConfig(confPath, dynMap) return nil } func (conf Config) WriteMergeConfig(confPath string, dynMap map[string]interface{}) error { - input, err := ioutil.ReadFile("myfile") + input, err := ioutil.ReadFile(confPath) if err != nil { log.Fatalln(err) } @@ -1137,12 +1133,24 @@ func (conf Config) WriteMergeConfig(confPath string, dynMap map[string]interface lines := strings.Split(string(input), "\n") for i, line := range lines { - if strings.Contains(line, "]") { - lines[i] = "LOL" + for k, v := range dynMap { + tmp := strings.Split(line, "=") + tmp[0] = strings.ReplaceAll(tmp[0], " ", "") + if tmp[0] == k { + //fmt.Printf("Write Merge Conf : line %s, k %s, v %v\n", line, k, v) + switch v.(type) { + case string: + lines[i] = k + " = " + fmt.Sprintf("\"%v\"", v) + default: + lines[i] = k + " = " + fmt.Sprintf("%v", v) + } + + } } + } output := strings.Join(lines, "\n") - err = ioutil.WriteFile("myfile", []byte(output), 0644) + err = ioutil.WriteFile(confPath, []byte(output), 0644) if err != nil { log.Fatalln(err) } diff --git a/server/api.go b/server/api.go index 29a26e212..5ac42508c 100644 --- a/server/api.go +++ b/server/api.go @@ -437,6 +437,13 @@ func (repman *ReplicationManager) handlerMuxClusterAdd(w http.ResponseWriter, r } +func (repman *ReplicationManager) handlerMuxClusterDelete(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + vars := mux.Vars(r) + repman.DeleteCluster(vars["clusterName"]) + +} + // swagger:operation GET /api/prometheus prometheus // Returns the Prometheus metrics for all database instances on the server // in the Prometheus text format diff --git a/server/api_cluster.go b/server/api_cluster.go index fd16d29c5..7035c0c5b 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -240,6 +240,11 @@ func (repman *ReplicationManager) apiClusterProtectedHandler(router *mux.Router) negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterAdd)), )) + router.Handle("/api/clusters/actions/delete/{clusterName}", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterDelete)), + )) + router.Handle("/api/clusters/{clusterName}/topology/servers", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), negroni.Wrap(http.HandlerFunc(repman.handlerMuxServers)), diff --git a/server/server.go b/server/server.go index ac43f8fc9..9b6770b4a 100644 --- a/server/server.go +++ b/server/server.go @@ -321,16 +321,18 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { //secRead.UnmarshalKey("default", &test) //fmt.Printf("REPMAN DEFAULT SECTION : %s", secRead.AllSettings()) + if secRead != nil { + for _, f := range secRead.AllKeys() { + v := secRead.Get(f) + if v != nil { + ImmuableMap[f] = secRead.Get(f) + } - //Add immuatable flag from default section - for _, f := range secRead.AllKeys() { - v := secRead.Get(f) - if v != nil { - ImmuableMap[f] = secRead.Get(f) } - } + //Add immuatable flag from default section + //test.PrintConf() //from here first read as the combination of default sections variables but not forced parameters diff --git a/server/server_add.go b/server/server_add.go index f3829eb8b..e812163f8 100644 --- a/server/server_add.go +++ b/server/server_add.go @@ -12,7 +12,6 @@ import ( func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead string) error { var myconf = make(map[string]config.Config) - myconf[clusterName] = repman.Conf repman.Lock() repman.ClusterList = append(repman.ClusterList, clusterName) @@ -26,18 +25,7 @@ func (repman *ReplicationManager) AddCluster(clusterName string, clusterHead str repman.DynamicFlagMaps[clusterName] = repman.DynamicFlagMaps["default"] repman.Unlock() - /*file, err := os.OpenFile(repman.Conf.ClusterConfigPath+"/"+clusterName+".toml", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) - if err != nil { - if os.IsPermission(err) { - log.Errorf("Read file permission denied: %s", repman.Conf.ClusterConfigPath+"/"+clusterName+".toml") - } - return err - } - defer file.Close() - err = toml.NewEncoder(file).Encode(myconf) - if err != nil { - return err - }*/ + //confs[clusterName] = repman.GetClusterConfig(fistRead, repman.ImmuableFlagMaps["default"], repman.DynamicFlagMaps["default"], clusterName, conf) cluster, _ := repman.StartCluster(clusterName) diff --git a/share/dashboard/app/dashboard.js b/share/dashboard/app/dashboard.js index 036c7e46e..90646b194 100644 --- a/share/dashboard/app/dashboard.js +++ b/share/dashboard/app/dashboard.js @@ -932,7 +932,10 @@ function ( console.log("Error in set orchetrator."); }); }; - + var deleteCluster = function (cluster) { + console.log("cluster "+ cluster + " deleted.." ); + $http.get('/api/clusters/actions/delete/' +cluster) + }; var createClusterSetPlan = function (cluster,plan) { console.log('Setting plan..' + plan); httpGetWithoutResponse('/api/clusters/'+ cluster + '/settings/actions/set/prov-service-plan/'+plan); @@ -1593,6 +1596,34 @@ function ( $mdSidenav('right').close(); $scope.menuOpened = false; }; + $scope.closeDeleteClusterDialog = function () { + $mdDialog.hide({contentElement: '#myDeleteClusterDialog',}); + if (confirm("Confirm Deleting Cluster : " + $scope.selectedClusterName )) { + deleteCluster($scope.selectedClusterName); + + $scope.selectedClusterName = $scope.dlgAddClusterName; + $scope.servers={}; + $scope.slaves={}; + $scope.master={}; + $scope.alerts={}; + $scope.logs={}; + $scope.proxies={}; + // $scope.callServices(); + // $scope.setClusterCredentialDialog(); + } + $mdSidenav('right').close(); + $scope.menuOpened = false; + }; + $scope.deleteClusterDialog = function () { + $scope.menuOpened = true; + $mdDialog.show({ + contentElement: '#myDeleteClusterDialog', + preserveScope: true, + parent: angular.element(document.body), + // clickOutsideToClose: false, + // escapeToClose: false, + }); + }; $scope.cancelNewClusterDialog = function () { $mdDialog.hide({contentElement: '#myNewClusterDialog',}); $mdSidenav('right').close(); diff --git a/share/dashboard/static/menu-monitor.html b/share/dashboard/static/menu-monitor.html index 90d21eb40..cf686727d 100644 --- a/share/dashboard/static/menu-monitor.html +++ b/share/dashboard/static/menu-monitor.html @@ -23,6 +23,13 @@ + + + +
Delete
Shard
Cluster
+
+
+ diff --git a/share/dashboard/static/monitor-dialogs.html b/share/dashboard/static/monitor-dialogs.html index 5609f6dcd..a8edddc5f 100644 --- a/share/dashboard/static/monitor-dialogs.html +++ b/share/dashboard/static/monitor-dialogs.html @@ -15,6 +15,23 @@

Choose a Cluster

+ +