diff --git a/cmd/collectors/collectorstest.go b/cmd/collectors/collectorstest.go index ad432c0db..9e9b073aa 100644 --- a/cmd/collectors/collectorstest.go +++ b/cmd/collectors/collectorstest.go @@ -50,13 +50,11 @@ func JSONToGson(path string, flatten bool) []gjson.Result { return nil } bb := b.Bytes() - output := gjson.GetManyBytes(bb, "records", "num_records", "_links.next.href") + output := gjson.ParseBytes(bb) + data := output.Get("records") + numRecords := output.Get("num_records") - data := output[0] - numRecords := output[1] - isNonIterRestCall := !data.Exists() - - if isNonIterRestCall { + if !data.Exists() { contentJSON := `{"records":[]}` response, err := sjson.SetRawBytes([]byte(contentJSON), "records.-1", bb) if err != nil { diff --git a/cmd/collectors/restperf/restperf.go b/cmd/collectors/restperf/restperf.go index d65ab905c..e398e818a 100644 --- a/cmd/collectors/restperf/restperf.go +++ b/cmd/collectors/restperf/restperf.go @@ -476,18 +476,12 @@ func parseMetricResponse(instanceData gjson.Result, metric string) *metricRespon for _, name := range t.Array() { if name.String() == metric { metricPath := "counters.#(name=" + metric + ")" - many := gjson.GetMany(instanceDataS, - metricPath+".value", - metricPath+".values", - metricPath+".labels", - metricPath+".counters.#.label", - metricPath+".counters.#.values", - ) - value := many[0] - values := many[1] - labels := many[2] - subLabels := many[3] - subValues := many[4] + many := gjson.Parse(instanceDataS) + value := many.Get(metricPath + ".value") + values := many.Get(metricPath + ".values") + labels := many.Get(metricPath + ".labels") + subLabels := many.Get(metricPath + ".counters.#.label") + subValues := many.Get(metricPath + ".counters.#.values") if value.String() != "" { return &metricResponse{value: strings.Clone(value.String()), label: ""} } diff --git a/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go b/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go index d4f6fa12a..1e8e23967 100644 --- a/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go +++ b/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go @@ -131,9 +131,9 @@ func (t *JoinRest) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, err } func (t *JoinRest) updateCache(model join, bytes *[]byte) { - results := gjson.GetManyBytes(*bytes, "data.#."+model.JoinRest, "data.#."+model.LabelRest) - keys := results[0].Array() - vals := results[1].Array() + results := gjson.ParseBytes(*bytes) + keys := results.Get("data.#." + model.JoinRest).Array() + vals := results.Get("data.#." + model.LabelRest).Array() if len(keys) != len(vals) { t.Logger.Error(). Str("restKey", model.JoinRest). diff --git a/cmd/collectors/storagegrid/rest/client.go b/cmd/collectors/storagegrid/rest/client.go index d8feb4d08..c2af63765 100644 --- a/cmd/collectors/storagegrid/rest/client.go +++ b/cmd/collectors/storagegrid/rest/client.go @@ -143,8 +143,8 @@ func (c *Client) Fetch(request string, result *[]gjson.Result) error { return fmt.Errorf("error making request %w", err) } - output := gjson.GetManyBytes(fetched, "data") - data = output[0] + output := gjson.ParseBytes(fetched) + data = output.Get("data") for _, r := range data.Array() { *result = append(*result, r.Array()...) } @@ -172,8 +172,8 @@ func (c *Client) GetMetricQuery(metric string, result *[]gjson.Result) error { if err != nil { return err } - output := gjson.GetManyBytes(fetched, "data") - data := output[0] + output := gjson.ParseBytes(fetched) + data := output.Get("data") for _, r := range data.Array() { *result = append(*result, r.Array()...) } @@ -280,8 +280,8 @@ func (c *Client) Init(retries int) error { if content, err = c.GetGridRest("grid/config/product-version"); err != nil { continue } - results := gjson.GetManyBytes(content, "data.productVersion") - err = c.SetVersion(results[0].String()) + results := gjson.ParseBytes(content) + err = c.SetVersion(results.Get("data.productVersion").String()) if err != nil { return err } @@ -289,14 +289,15 @@ func (c *Client) Init(retries int) error { if content, err = c.GetGridRest("grid/health/topology?depth=grid"); err != nil { continue } - results = gjson.GetManyBytes(content, "data.name") - c.Cluster.Name = strings.ReplaceAll(results[0].String(), " ", "_") + + results = gjson.ParseBytes(content) + c.Cluster.Name = strings.ReplaceAll(results.Get("data.name").String(), " ", "_") if content, err = c.GetGridRest("grid/license"); err != nil { continue } - results = gjson.GetManyBytes(content, "data.systemId") - c.Cluster.UUID = results[0].String() + results = gjson.ParseBytes(content) + c.Cluster.UUID = results.Get("data.systemId").String() return nil } @@ -377,9 +378,9 @@ func (c *Client) fetchTokenWithAuthRetry() error { return errs.NewStorageGridErr(response.StatusCode, body) } - results := gjson.GetManyBytes(body, "data", "message.text") - token := results[0] - errorMsg := results[1] + results := gjson.ParseBytes(body) + token := results.Get("data") + errorMsg := results.Get("message.text") if token.Exists() { c.token = token.String() diff --git a/cmd/collectors/zapi/plugins/aggregate/aggregate.go b/cmd/collectors/zapi/plugins/aggregate/aggregate.go index 5e50958f7..cc4105957 100644 --- a/cmd/collectors/zapi/plugins/aggregate/aggregate.go +++ b/cmd/collectors/zapi/plugins/aggregate/aggregate.go @@ -52,18 +52,47 @@ func (a *Aggregate) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, er if err := a.getCloudStores(); err != nil { if errors.Is(err, errs.ErrNoInstance) { a.Logger.Debug().Err(err).Msg("Failed to collect cloud store data") - return nil, nil } - return nil, err + } + + aggrFootprintMap, err := a.getAggrFootprint() + if err != nil { + a.Logger.Error().Err(err).Msg("Failed to update footprint data") + // clean the map in case of the error + clear(aggrFootprintMap) } // update aggregate instance label with cloud stores info - if len(a.aggrCloudStoresMap) > 0 { - for aggrUUID, aggr := range data.GetInstances() { - if !aggr.IsExportable() { - continue + for aggrUUID, aggr := range data.GetInstances() { + if !aggr.IsExportable() { + continue + } + aggr.SetLabel("cloud_stores", strings.Join(a.aggrCloudStoresMap[aggrUUID], ",")) + + // Handling aggr footprint metrics + aggrName := aggr.GetLabel("aggr") + if af, ok := aggrFootprintMap[aggrName]; ok { + for afKey, afVal := range af { + vfMetric := data.GetMetric(afKey) + if vfMetric == nil { + if vfMetric, err = data.NewMetricFloat64(afKey); err != nil { + a.Logger.Error().Err(err).Str("metric", afKey).Msg("add metric") + continue + } + } + + if afVal != "" { + vfMetricVal, err := strconv.ParseFloat(afVal, 64) + if err != nil { + a.Logger.Error().Err(err).Str(afKey, afVal).Msg("parse") + continue + } + if err = vfMetric.SetValueFloat64(aggr, vfMetricVal); err != nil { + a.Logger.Error().Err(err).Str(afKey, afVal).Msg("set") + continue + } + } } - aggr.SetLabel("cloud_stores", strings.Join(a.aggrCloudStoresMap[aggrUUID], ",")) } } return nil, nil @@ -124,3 +153,44 @@ func (a *Aggregate) getCloudStores() error { } return nil } + +func (a *Aggregate) getAggrFootprint() (map[string]map[string]string, error) { + var ( + result []*node.Node + aggrFootprintMap map[string]map[string]string + err error + ) + + aggrFootprintMap = make(map[string]map[string]string) + request := node.NewXMLS("aggr-space-get-iter") + request.NewChildS("max-records", collectors.DefaultBatchSize) + desired := node.NewXMLS("desired-attributes") + spaceInfo := node.NewXMLS("space-information") + spaceInfo.NewChildS("aggregate", "") + spaceInfo.NewChildS("volume-footprints", "") + spaceInfo.NewChildS("volume-footprints-percent", "") + desired.AddChild(spaceInfo) + request.AddChild(desired) + + if result, err = a.client.InvokeZapiCall(request); err != nil { + return nil, err + } + + if len(result) == 0 { + return aggrFootprintMap, nil + } + + for _, footprint := range result { + footprintMetrics := make(map[string]string) + aggr := footprint.GetChildContentS("aggregate") + performanceTierUsed := footprint.GetChildContentS("volume-footprints") + performanceTierUsedPerc := footprint.GetChildContentS("volume-footprints-percent") + if performanceTierUsed != "" || performanceTierUsedPerc != "" { + footprintMetrics["space_performance_tier_used"] = performanceTierUsed + footprintMetrics["space_performance_tier_used_percent"] = performanceTierUsedPerc + aggrFootprintMap[aggr] = footprintMetrics + } + } + + return aggrFootprintMap, nil +} diff --git a/cmd/collectors/zapi/plugins/volume/volume.go b/cmd/collectors/zapi/plugins/volume/volume.go index 66a5e5ec9..ad9cc9b7b 100644 --- a/cmd/collectors/zapi/plugins/volume/volume.go +++ b/cmd/collectors/zapi/plugins/volume/volume.go @@ -92,19 +92,25 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error } volumeCloneMap, err := v.getVolumeCloneInfo() - if err != nil { v.Logger.Error().Err(err).Msg("Failed to update clone data") } + volumeFootprintMap, err := v.getVolumeFootprint() + if err != nil { + v.Logger.Error().Err(err).Msg("Failed to update footprint data") + // clean the map in case of the error + clear(volumeFootprintMap) + } + // update volume instance labels - v.updateVolumeLabels(data, volumeCloneMap) + v.updateVolumeLabels(data, volumeCloneMap, volumeFootprintMap) v.currentVal++ return nil, nil } -func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[string]volumeClone) { +func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[string]volumeClone, volumeFootprintMap map[string]map[string]string) { var err error for _, volume := range data.GetInstances() { if !volume.IsExportable() { @@ -143,6 +149,31 @@ func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[stri continue } } + + // Handling volume footprint metrics + if vf, ok := volumeFootprintMap[key]; ok { + for vfKey, vfVal := range vf { + vfMetric := data.GetMetric(vfKey) + if vfMetric == nil { + if vfMetric, err = data.NewMetricFloat64(vfKey); err != nil { + v.Logger.Error().Err(err).Str("metric", vfKey).Msg("add metric") + continue + } + } + + if vfVal != "" { + vfMetricVal, err := strconv.ParseFloat(vfVal, 64) + if err != nil { + v.Logger.Error().Err(err).Str(vfKey, vfVal).Msg("parse") + continue + } + if err = vfMetric.SetValueFloat64(volume, vfMetricVal); err != nil { + v.Logger.Error().Err(err).Str(vfKey, vfVal).Msg("set") + continue + } + } + } + } } } @@ -186,6 +217,53 @@ func (v *Volume) getVolumeCloneInfo() (map[string]volumeClone, error) { return volumeCloneMap, nil } +func (v *Volume) getVolumeFootprint() (map[string]map[string]string, error) { + var ( + result []*node.Node + volumeFootprintMap map[string]map[string]string + err error + ) + + volumeFootprintMap = make(map[string]map[string]string) + request := node.NewXMLS("volume-footprint-get-iter") + request.NewChildS("max-records", collectors.DefaultBatchSize) + desired := node.NewXMLS("desired-attributes") + footprintInfo := node.NewXMLS("footprint-info") + footprintInfo.NewChildS("volume", "") + footprintInfo.NewChildS("vserver", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin0", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin0-percent", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin1", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin1-percent", "") + desired.AddChild(footprintInfo) + request.AddChild(desired) + + if result, err = v.client.InvokeZapiCall(request); err != nil { + return nil, err + } + + if len(result) == 0 { + return volumeFootprintMap, nil + } + + for _, footprint := range result { + footprintMetrics := make(map[string]string) + volume := footprint.GetChildContentS("volume") + svm := footprint.GetChildContentS("vserver") + performanceTierFootprint := footprint.GetChildContentS("volume-blocks-footprint-bin0") + performanceTierFootprintPerc := footprint.GetChildContentS("volume-blocks-footprint-bin0-percent") + capacityTierFootprint := footprint.GetChildContentS("volume-blocks-footprint-bin1") + capacityTierFootprintPerc := footprint.GetChildContentS("volume-blocks-footprint-bin1-percent") + footprintMetrics["performance_tier_footprint"] = performanceTierFootprint + footprintMetrics["performance_tier_footprint_percent"] = performanceTierFootprintPerc + footprintMetrics["capacity_tier_footprint"] = capacityTierFootprint + footprintMetrics["capacity_tier_footprint_percent"] = capacityTierFootprintPerc + volumeFootprintMap[volume+svm] = footprintMetrics + } + + return volumeFootprintMap, nil +} + func (v *Volume) getEncryptedDisks() ([]string, error) { var ( result []*node.Node diff --git a/cmd/tools/doctor/doctor.go b/cmd/tools/doctor/doctor.go index ec347add7..33c753ddd 100644 --- a/cmd/tools/doctor/doctor.go +++ b/cmd/tools/doctor/doctor.go @@ -96,39 +96,39 @@ func doDoctorCmd(cmd *cobra.Command, _ []string) { } func doDoctor(path string, confPath string) { - contents, err := os.ReadFile(path) - if err != nil { - fmt.Printf("error reading config file. err=%+v\n", err) - return - } if opts.ShouldPrintConfig { + contents, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading config file. err=%+v\n", err) + return + } printRedactedConfig(path, contents) } - checkAll(path, contents, confPath) + checkAll(path, confPath) } // checkAll runs all doctor checks // If all checks succeed, print nothing and exit with a return code of 0 // Otherwise, print what failed and exit with a return code of 1 -func checkAll(path string, contents []byte, confPath string) { +func checkAll(path string, confPath string) { // See https://github.com/NetApp/harvest/issues/16 for more checks to add color.DetectConsole(opts.Color) - // Validate that the config file can be parsed - harvestConfig := &conf.HarvestConfig{} - err := yaml.Unmarshal(contents, harvestConfig) + + _, err := conf.LoadHarvestConfig(path) if err != nil { fmt.Printf("error reading config file=[%s] %+v\n", path, err) os.Exit(1) return } + cfg := conf.Config confPaths := filepath.SplitList(confPath) anyFailed := false - anyFailed = !checkUniquePromPorts(*harvestConfig).isValid || anyFailed - anyFailed = !checkPollersExportToUniquePromPorts(*harvestConfig).isValid || anyFailed - anyFailed = !checkExporterTypes(*harvestConfig).isValid || anyFailed + anyFailed = !checkUniquePromPorts(cfg).isValid || anyFailed + anyFailed = !checkPollersExportToUniquePromPorts(cfg).isValid || anyFailed + anyFailed = !checkExporterTypes(cfg).isValid || anyFailed anyFailed = !checkConfTemplates(confPaths).isValid || anyFailed - anyFailed = !checkCollectorName(*harvestConfig).isValid || anyFailed + anyFailed = !checkCollectorName(cfg).isValid || anyFailed if anyFailed { os.Exit(1) diff --git a/cmd/tools/generate/generate.go b/cmd/tools/generate/generate.go index df69152e2..ad4c00448 100644 --- a/cmd/tools/generate/generate.go +++ b/cmd/tools/generate/generate.go @@ -29,7 +29,6 @@ type PollerInfo struct { ContainerName string ShowPorts bool IsFull bool - TemplateDir string CertDir string Mounts []string } @@ -61,11 +60,12 @@ type options struct { filesdPath string showPorts bool outputPath string - templateDir string certDir string promPort int grafanaPort int mounts []string + configPath string + confPath string } var opts = &options{ @@ -105,22 +105,28 @@ var metricCmd = &cobra.Command{ } func doDockerFull(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateDocker(conf.ConfigPath(config.Value.String()), full) + addRootOptions(cmd) + generateDocker(full) } + func doSystemd(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateSystemd(conf.ConfigPath(config.Value.String())) + addRootOptions(cmd) + generateSystemd() } func doDockerCompose(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateDocker(conf.ConfigPath(config.Value.String()), harvest) + addRootOptions(cmd) + generateDocker(harvest) } func doGenerateMetrics(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateMetrics(conf.ConfigPath(config.Value.String())) + addRootOptions(cmd) + generateMetrics() +} + +func addRootOptions(cmd *cobra.Command) { + opts.configPath = conf.ConfigPath(cmd.Root().PersistentFlags().Lookup("config").Value.String()) + opts.confPath = cmd.Root().PersistentFlags().Lookup("confpath").Value.String() } const ( @@ -134,15 +140,13 @@ func normalizeContainerNames(name string) string { return strings.ToLower(re.ReplaceAllString(name, "-")) } -func generateDocker(path string, kind int) { +func generateDocker(kind int) { var ( - pollerTemplate PollerTemplate - configFilePath string - templateDirPath string - certDirPath string - filesd []string - extraMounts []string - out *os.File + pollerTemplate PollerTemplate + configFilePath string + certDirPath string + filesd []string + out *os.File ) pollerTemplate = PollerTemplate{} @@ -150,19 +154,13 @@ func generateDocker(path string, kind int) { opts.grafanaPort, opts.promPort, } - _, err := conf.LoadHarvestConfig(path) + _, err := conf.LoadHarvestConfig(opts.configPath) if err != nil { logErrAndExit(err) } - configFilePath = asComposePath(path) - templateDirPath = asComposePath(opts.templateDir) + configFilePath = asComposePath(opts.configPath) certDirPath = asComposePath(opts.certDir) - extraMounts = make([]string, 0, len(opts.mounts)) - for _, mount := range opts.mounts { - extraMounts = append(extraMounts, asComposePath(mount)) - } - for _, v := range conf.Config.PollersOrdered { port, _ := conf.GetPrometheusExporterPorts(v, true) pollerInfo := PollerInfo{ @@ -175,9 +173,8 @@ func generateDocker(path string, kind int) { ContainerName: normalizeContainerNames("poller_" + v), ShowPorts: opts.showPorts, IsFull: kind == full, - TemplateDir: templateDirPath, CertDir: certDirPath, - Mounts: extraMounts, + Mounts: makeMounts(v), } pollerTemplate.Pollers = append(pollerTemplate.Pollers, pollerInfo) filesd = append(filesd, fmt.Sprintf("- targets: ['%s:%d']", pollerInfo.ServiceName, pollerInfo.Port)) @@ -272,6 +269,40 @@ func generateDocker(path string, kind int) { } } +// setup mount(s) for the confpath and any CLI-passed mounts +func makeMounts(pollerName string) []string { + var mounts = opts.mounts + + p, err := conf.PollerNamed(pollerName) + if err != nil { + logErrAndExit(err) + } + + confPath := opts.confPath + if confPath == "conf" { + confPath = p.ConfPath + } + + if confPath == "" { + mounts = append(mounts, toMount("./conf")) + } else { + paths := strings.Split(confPath, ":") + for _, path := range paths { + mounts = append(mounts, toMount(path)) + } + } + + return mounts +} + +func toMount(hostPath string) string { + hostPath = asComposePath(hostPath) + if strings.HasPrefix(hostPath, "./") { + return hostPath + ":" + "/opt/harvest/" + hostPath[2:] + } + return hostPath + ":" + hostPath +} + func copyFiles(srcPath, destPath string) error { filesToExclude := map[string]bool{ "harvest.yml": true, @@ -346,9 +377,9 @@ func silentClose(body io.ReadCloser) { _ = body.Close() } -func generateSystemd(path string) { +func generateSystemd() { var adminService string - _, err := conf.LoadHarvestConfig(path) + _, err := conf.LoadHarvestConfig(opts.configPath) if err != nil { logErrAndExit(err) } @@ -367,7 +398,7 @@ func generateSystemd(path string) { println("and " + color.Colorize("cp "+harvestAdminService+" /etc/systemd/system/", color.Green)) } println("and then run " + color.Colorize("systemctl daemon-reload", color.Green)) - writeAdminSystemd(path) + writeAdminSystemd(opts.configPath) // reorder list of pollers so that unix collectors are last, see https://github.com/NetApp/harvest/issues/643 pollers := make([]string, 0) unixPollers := make([]string, 0) @@ -422,7 +453,7 @@ func writeAdminSystemd(configFp string) { println(color.Colorize("✓", color.Green) + " HTTP SD file: " + harvestAdminService + " created") } -func generateMetrics(path string) { +func generateMetrics() { var ( poller *conf.Poller err error @@ -430,7 +461,7 @@ func generateMetrics(path string) { zapiClient *zapi.Client ) - _, err = conf.LoadHarvestConfig(path) + _, err = conf.LoadHarvestConfig(opts.configPath) if err != nil { logErrAndExit(err) } @@ -480,7 +511,6 @@ func init() { "logging level (0=trace, 1=debug, 2=info, 3=warning, 4=error, 5=critical)", ) dFlags.StringVar(&opts.image, "image", "ghcr.io/netapp/harvest:latest", "Harvest image. Use rahulguptajss/harvest:latest to pull from Docker Hub") - dFlags.StringVar(&opts.templateDir, "templatedir", "./conf", "Harvest template dir path") dFlags.StringVar(&opts.certDir, "certdir", "./cert", "Harvest certificate dir path") dFlags.StringVarP(&opts.outputPath, "output", "o", "", "Output file path. ") dFlags.BoolVarP(&opts.showPorts, "port", "p", true, "Expose poller ports to host machine") diff --git a/cmd/tools/generate/generate_test.go b/cmd/tools/generate/generate_test.go new file mode 100644 index 000000000..bde7879b1 --- /dev/null +++ b/cmd/tools/generate/generate_test.go @@ -0,0 +1,22 @@ +package generate + +import "testing" + +func Test_toMount(t *testing.T) { + tests := []struct { + name string + hostPath string + want string + }{ + {name: "dot prefix", hostPath: "./abc/d", want: "./abc/d:/opt/harvest/abc/d"}, + {name: "absolute", hostPath: "/x/y/z", want: "/x/y/z:/x/y/z"}, + {name: "cwd", hostPath: "abc/d", want: "./abc/d:/opt/harvest/abc/d"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := toMount(tt.hostPath); got != tt.want { + t.Errorf("toMount() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cmd/tools/grafana/dashboard_test.go b/cmd/tools/grafana/dashboard_test.go index d7d151b39..4563702c7 100644 --- a/cmd/tools/grafana/dashboard_test.go +++ b/cmd/tools/grafana/dashboard_test.go @@ -27,7 +27,7 @@ var aggregationPattern = regexp.MustCompile(`\b(sum|count|min|max)\b`) func checkThreshold(t *testing.T, path string, data []byte) { path = shortPath(path) var thresholdMap = map[string][]string{ - // _latency are in microseconds + // _latencies are in microseconds "_latency": { "[\"green\",\"orange\",\"red\"]", "[null,20000,30000]", @@ -37,7 +37,7 @@ func checkThreshold(t *testing.T, path string, data []byte) { "[null,60,80]", }, } - // visit all panel for datasource test + // visit all panels for datasource test visitAllPanels(data, func(p string, key, value gjson.Result) { panelTitle := value.Get("title").String() kind := value.Get("type").String() @@ -62,7 +62,7 @@ func checkThreshold(t *testing.T, path string, data []byte) { "table": {"color-background", "lcd-gauge"}, "stat": {"background"}, } - // check in default also for stat. For table we only want relevant column background and override settings + // check in default also for stat. For table, we only want the relevant column background and override settings if kind == "stat" { dS := value.Get("fieldConfig.defaults") tSlice := dS.Get("thresholds") @@ -1080,19 +1080,19 @@ func checkPercentHasMinMax(t *testing.T, path string, data []byte) { if defaultUnit != "percent" && defaultUnit != "percentunit" { return } - min := value.Get("fieldConfig.defaults.min").String() - max := value.Get("fieldConfig.defaults.max").String() - if min != "0" { + theMin := value.Get("fieldConfig.defaults.min").String() + theMax := value.Get("fieldConfig.defaults.max").String() + if theMin != "0" { t.Errorf(`dashboard=%s path=%s panel="%s" has unit=%s, min should be 0 got=%s`, - dashPath, path, value.Get("title").String(), defaultUnit, min) + dashPath, path, value.Get("title").String(), defaultUnit, theMin) } - if defaultUnit == "percent" && max != "100" { + if defaultUnit == "percent" && theMax != "100" { t.Errorf(`dashboard=%s path=%s panel="%s" has unit=%s, max should be 100 got=%s`, - dashPath, path, value.Get("title").String(), defaultUnit, max) + dashPath, path, value.Get("title").String(), defaultUnit, theMax) } - if defaultUnit == "percentunit" && max != "1" { + if defaultUnit == "percentunit" && theMax != "1" { t.Errorf(`dashboard=%s path=%s panel="%s" has unit=%s, max should be 1 got=%s`, - dashPath, path, value.Get("title").String(), defaultUnit, max) + dashPath, path, value.Get("title").String(), defaultUnit, theMax) } }) } @@ -1259,3 +1259,23 @@ func checkDashboardTime(t *testing.T, path string, data []byte) { t.Errorf("dashboard=%s time.to got=%s want=%s", dashPath, to.String(), toWant) } } + +func TestNoDrillDownRows(t *testing.T) { + visitDashboards(dashboards, func(path string, data []byte) { + checkRowNames(t, path, data) + }) +} + +func checkRowNames(t *testing.T, path string, data []byte) { + path = shortPath(path) + visitAllPanels(data, func(p string, key, value gjson.Result) { + kind := value.Get("type").String() + if kind == "row" { + title := value.Get("title").String() + if strings.Contains(title, "Drilldown") { + t.Errorf(`dashboard=%s path=panels[%d] title=[%s] got row with Drilldown in title. Remove drilldown`, path, key.Int(), title) + } + } + }) + +} diff --git a/cmd/tools/rest/client.go b/cmd/tools/rest/client.go index 5b8c6ac88..0aaff5ce7 100644 --- a/cmd/tools/rest/client.go +++ b/cmd/tools/rest/client.go @@ -158,9 +158,9 @@ func (c *Client) invokeWithAuthRetry() ([]byte, error) { doInvoke := func() ([]byte, error) { var ( - response *http.Response - body []byte - err error + response *http.Response + innerBody []byte + innerErr error ) if c.request.Body != nil { @@ -174,23 +174,23 @@ func (c *Client) invokeWithAuthRetry() ([]byte, error) { restReq := c.request.URL.String() // send request to server - if response, err = c.client.Do(c.request); err != nil { - return nil, fmt.Errorf("connection error %w", err) + if response, innerErr = c.client.Do(c.request); innerErr != nil { + return nil, fmt.Errorf("connection error %w", innerErr) } //goland:noinspection GoUnhandledErrorResult defer response.Body.Close() + innerBody, innerErr = io.ReadAll(response.Body) + if innerErr != nil { + return nil, errs.Rest(response.StatusCode, innerErr.Error(), 0, "") + } if response.StatusCode != http.StatusOK { - body2, err2 := io.ReadAll(response.Body) - if err2 != nil { - return nil, errs.Rest(response.StatusCode, err2.Error(), 0, "") - } if response.StatusCode == http.StatusUnauthorized { return nil, errs.New(errs.ErrAuthFailed, response.Status) } - result := gjson.GetBytes(body2, "error") + result := gjson.GetBytes(innerBody, "error") if response.StatusCode == http.StatusForbidden { message := result.Get(Message).String() @@ -206,16 +206,9 @@ func (c *Client) invokeWithAuthRetry() ([]byte, error) { return nil, errs.Rest(response.StatusCode, "", 0, "") } - // read response body - if body, err = io.ReadAll(response.Body); err != nil { - return nil, err - } - defer c.printRequestAndResponse(restReq, body) + defer c.printRequestAndResponse(restReq, innerBody) - if err != nil { - return nil, err - } - return body, nil + return innerBody, nil } body, err = doInvoke() @@ -308,13 +301,13 @@ func (c *Client) Init(retries int) error { continue } - results := gjson.GetManyBytes(content, "name", "uuid", "version.full", "version.generation", "version.major", "version.minor") - c.cluster.Name = results[0].String() - c.cluster.UUID = results[1].String() - c.cluster.Info = results[2].String() - c.cluster.Version[0] = int(results[3].Int()) - c.cluster.Version[1] = int(results[4].Int()) - c.cluster.Version[2] = int(results[5].Int()) + results := gjson.ParseBytes(content) + c.cluster.Name = results.Get("name").String() + c.cluster.UUID = results.Get("uuid").String() + c.cluster.Info = results.Get("version.full").String() + c.cluster.Version[0] = int(results.Get("version.generation").Int()) + c.cluster.Version[1] = int(results.Get("version.major").Int()) + c.cluster.Version[2] = int(results.Get("version.minor").Int()) return nil } return err diff --git a/cmd/tools/rest/rest.go b/cmd/tools/rest/rest.go index a5be563ee..a42a402dc 100644 --- a/cmd/tools/rest/rest.go +++ b/cmd/tools/rest/rest.go @@ -450,16 +450,12 @@ func fetch(client *Client, href string, records *[]gjson.Result, downloadAll boo return fmt.Errorf("error making request %w", err) } - isNonIterRestCall := false - output := gjson.GetManyBytes(getRest, "records", "num_records", "_links.next.href") - data := output[0] - numRecords := output[1] - next := output[2] - if !data.Exists() { - isNonIterRestCall = true - } + output := gjson.ParseBytes(getRest) + data := output.Get("records") + numRecords := output.Get("num_records") + next := output.Get("_links.next.href") - if isNonIterRestCall { + if !data.Exists() { contentJSON := `{"records":[]}` response, err := sjson.SetRawBytes([]byte(contentJSON), "records.-1", getRest) if err != nil { @@ -503,11 +499,11 @@ func fetchAnalytics(client *Client, href string, records *[]gjson.Result, analyt return fmt.Errorf("error making request %w", err) } - output := gjson.GetManyBytes(getRest, "records", "num_records", "_links.next.href", "analytics") - data := output[0] - numRecords := output[1] - next := output[2] - *analytics = output[3] + output := gjson.ParseBytes(getRest) + data := output.Get("records") + numRecords := output.Get("num_records") + next := output.Get("_links.next.href") + *analytics = output.Get("analytics") // extract returned records since paginated records need to be merged into a single lists if numRecords.Exists() && numRecords.Int() > 0 { @@ -546,11 +542,10 @@ func FetchRestPerfData(client *Client, href string, perfRecords *[]PerfRecord) e } // extract returned records since paginated records need to be merged into a single list - output := gjson.GetManyBytes(getRest, "records", "num_records", "_links.next.href") - - data := output[0] - numRecords := output[1] - next := output[2] + output := gjson.ParseBytes(getRest) + data := output.Get("records") + numRecords := output.Get("num_records") + next := output.Get("_links.next.href") if numRecords.Exists() && numRecords.Int() > 0 { p := PerfRecord{Records: data, Timestamp: time.Now().UnixNano()} diff --git a/conf/rest/9.10.0/aggr.yaml b/conf/rest/9.10.0/aggr.yaml index 44475b8dd..6d0cd880b 100644 --- a/conf/rest/9.10.0/aggr.yaml +++ b/conf/rest/9.10.0/aggr.yaml @@ -40,6 +40,8 @@ counters: - space.efficiency_without_snapshots.savings => efficiency_savings_wo_snapshots - space.efficiency_without_snapshots_flexclones.logical_used => logical_used_wo_snapshots_flexclones - space.efficiency_without_snapshots_flexclones.savings => efficiency_savings_wo_snapshots_flexclones + - space.footprint => space_performance_tier_used + - space.footprint_percent => space_performance_tier_used_percent - space.snapshot.available => snapshot_size_available - space.snapshot.reserve_percent => snapshot_reserve_percent - space.snapshot.total => snapshot_size_total diff --git a/conf/rest/9.10.0/volume.yaml b/conf/rest/9.10.0/volume.yaml index 399be8bdb..544c286cd 100644 --- a/conf/rest/9.10.0/volume.yaml +++ b/conf/rest/9.10.0/volume.yaml @@ -93,6 +93,16 @@ endpoints: - filter: - privilege_level=diagnostic + - query: api/private/cli/volume/footprint + counters: + - ^^volume + - ^^vserver => svm + - volume_blocks_footprint_bin0 => performance_tier_footprint + - volume_blocks_footprint_bin0_percent => performance_tier_footprint_percent + - volume_blocks_footprint_bin1 => capacity_tier_footprint + - volume_blocks_footprint_bin1_percent => capacity_tier_footprint_percent + + plugins: - Volume: schedule: diff --git a/conf/rest/9.12.0/aggr.yaml b/conf/rest/9.12.0/aggr.yaml index 60918227e..4e83030eb 100644 --- a/conf/rest/9.12.0/aggr.yaml +++ b/conf/rest/9.12.0/aggr.yaml @@ -50,6 +50,8 @@ counters: - space.efficiency_without_snapshots.savings => efficiency_savings_wo_snapshots - space.efficiency_without_snapshots_flexclones.logical_used => logical_used_wo_snapshots_flexclones - space.efficiency_without_snapshots_flexclones.savings => efficiency_savings_wo_snapshots_flexclones + - space.footprint => space_performance_tier_used + - space.footprint_percent => space_performance_tier_used_percent - space.snapshot.available => snapshot_size_available - space.snapshot.reserve_percent => snapshot_reserve_percent - space.snapshot.total => snapshot_size_total diff --git a/conf/rest/9.9.0/volume.yaml b/conf/rest/9.9.0/volume.yaml index d2c9a6665..cdc5c3c9e 100644 --- a/conf/rest/9.9.0/volume.yaml +++ b/conf/rest/9.9.0/volume.yaml @@ -87,6 +87,16 @@ endpoints: - filter: - privilege_level=diagnostic + - query: api/private/cli/volume/footprint + counters: + - ^^volume + - ^^vserver => svm + - volume_blocks_footprint_bin0 => performance_tier_footprint + - volume_blocks_footprint_bin0_percent => performance_tier_footprint_percent + - volume_blocks_footprint_bin1 => capacity_tier_footprint + - volume_blocks_footprint_bin1_percent => capacity_tier_footprint_percent + + plugins: - Volume: schedule: diff --git a/conf/zapi/default.yaml b/conf/zapi/default.yaml index 2d1124b15..fd471c83b 100644 --- a/conf/zapi/default.yaml +++ b/conf/zapi/default.yaml @@ -38,4 +38,3 @@ objects: Support: support.yaml SVM: svm.yaml Volume: volume.yaml - diff --git a/container/onePollerPerContainer/docker-compose.tmpl b/container/onePollerPerContainer/docker-compose.tmpl index 1654461aa..9c564f948 100644 --- a/container/onePollerPerContainer/docker-compose.tmpl +++ b/container/onePollerPerContainer/docker-compose.tmpl @@ -26,7 +26,6 @@ services: command: '--poller {{ .PollerName }} {{if .Port }}--promPort {{ .Port }} {{ end }} {{- if ne .LogLevel 2 }}--loglevel {{ .LogLevel }} {{ end}}--config /opt/harvest.yml' volumes: - - {{ .TemplateDir }}:/opt/harvest/conf - {{ .CertDir }}:/opt/harvest/cert - {{ .ConfigFile }}:/opt/harvest.yml {{- range .Mounts}} diff --git a/docs/configure-harvest-basic.md b/docs/configure-harvest-basic.md index cf5b5699f..d926daec4 100644 --- a/docs/configure-harvest-basic.md +++ b/docs/configure-harvest-basic.md @@ -13,7 +13,8 @@ All pollers are defined in `harvest.yml`, the main configuration file of Harvest | `exporters` | **required** | List of exporter names from the `Exporters` section. Note: this should be the name of the exporter (e.g. `prometheus1`), not the value of the `exporter` key (e.g. `Prometheus`) | | | `auth_style` | required by Zapi* collectors | Either `basic_auth` or `certificate_auth` See [authentication](#authentication) for details | `basic_auth` | | `username`, `password` | required if `auth_style` is `basic_auth` | | | -| `ssl_cert`, `ssl_key` | optional if `auth_style` is `certificate_auth` | Absolute paths to SSL (client) certificate and key used to authenticate with the target system.

If not provided, the poller will look for `.key` and `.pem` in `$HARVEST_HOME/cert/`.

To create certificates for ONTAP systems, see [using certificate authentication](prepare-cdot-clusters.md#using-certificate-authentication) | | +| `ssl_cert`, `ssl_key` | optional if `auth_style` is `certificate_auth` | Paths to SSL (client) certificate and key used to authenticate with the target system.

If not provided, the poller will look for `.key` and `.pem` in `$HARVEST_HOME/cert/`.

To create certificates for ONTAP systems, see [using certificate authentication](prepare-cdot-clusters.md#using-certificate-authentication) | | +| `ca_cert` | optional if `auth_style` is `certificate_auth` | Path to file that contains PEM encoded certificates. Harvest will append these certificates to the system-wide set of root certificate authorities (CA).

If not provided, the OS's root CAs will be used.

To create certificates for ONTAP systems, see [using certificate authentication](prepare-cdot-clusters.md#using-certificate-authentication) | | | `use_insecure_tls` | optional, bool | If true, disable TLS verification when connecting to ONTAP cluster | false | | `credentials_file` | optional, string | Path to a yaml file that contains cluster credentials. The file should have the same shape as `harvest.yml`. See [here](configure-harvest-basic.md#credentials-file) for examples. Path can be relative to `harvest.yml` or absolute. | | | `credentials_script` | optional, section | Section that defines how Harvest should fetch credentials via external script. See [here](configure-harvest-basic.md#credentials-script) for details. | | @@ -62,6 +63,66 @@ Tools: #grafana_api_token: 'aaa-bbb-ccc-ddd' ``` +## Poller_files + +Harvest supports loading pollers from multiple files specified in the `Poller_files` section of your `harvest.yml` file. +For example, the following snippet tells harvest to load pollers from all the `*.yml` files under the `configs` directory, +and from the `path/to/single.yml` file. + +Paths may be relative or absolute. + +```yaml +Poller_files: + - configs/*.yml + - path/to/single.yml + +Pollers: + u2: + datacenter: dc-1 +``` + +Each referenced file can contain one or more unique pollers. +Ensure that you include the top-level `Pollers` section in these files. +All other top-level sections will be ignored. +For example: + +```yaml +# contents of configs/00-rtp.yml +Pollers: + ntap3: + datacenter: rtp + + ntap4: + datacenter: rtp +--- +# contents of configs/01-rtp.yml +Pollers: + ntap5: + datacenter: blr +--- +# contents of path/to/single.yml +Pollers: + ntap1: + datacenter: dc-1 + + ntap2: + datacenter: dc-1 +``` + +At runtime, all files will be read and combined into a single configuration. +The example above would result in the following set of pollers, in this order. +```yaml +- u2 +- ntap3 +- ntap4 +- ntap5 +- ntap1 +- ntap2 +``` + +When using glob patterns, the list of matching paths will be sorted before they are read. +Errors will be logged for all duplicate pollers and Harvest will refuse to start. + ## Configuring collectors Collectors are configured by their own configuration files ([templates](configure-templates.md)), which are stored in subdirectories @@ -175,7 +236,7 @@ At runtime, the `credentials_file` will be read and the included credentials wil matching cluster(s). This is handy when integrating with 3rd party credential stores. -See #884 for examples. +See [#884](https://github.com/NetApp/harvest/discussions/884) for examples. The format of the `credentials_file` is similar to `harvest.yml` and can contain multiple cluster credentials. diff --git a/docs/install/containers.md b/docs/install/containers.md index 5e673d8bc..dc9930906 100644 --- a/docs/install/containers.md +++ b/docs/install/containers.md @@ -77,6 +77,22 @@ docker run --rm \ --output harvest-compose.yml ``` +By default, the above command uses the harvest configuration file(`harvest.yml`) located in the current directory. If you want to use a harvest config from a different location. +??? question "What if my harvest configuration file is somewhere else or not named harvest.yml" + Use the following docker run command, updating the `HYML` variable with the absolute path to your `harvest.yml`. + + ```sh + HYML="/opt/custom_harvest.yml" \ + docker run --rm \ + --entrypoint "bin/harvest" \ + --volume "$(pwd):/opt/temp" \ + --volume "${HYML}:${HYML}" \ + ghcr.io/netapp/harvest:latest \ + generate docker full \ + --output harvest-compose.yml \ + --config "${HYML}" + ``` + `generate docker full` does two things: 1. Creates a Docker compose file with a container for each Harvest poller defined in your `harvest.yml` diff --git a/docs/install/k8.md b/docs/install/k8.md index 00fb05c39..3d6cadba6 100644 --- a/docs/install/k8.md +++ b/docs/install/k8.md @@ -5,11 +5,6 @@ The following steps are provided for reference purposes only. Depending on the s ### Requirements - [Kompose](https://github.com/kubernetes/kompose/): `v1.25` or higher -### Download and untar Harvest - -- Download the latest version of [Harvest](https://netapp.github.io/harvest/latest/install/native/), untar, and - cd into the harvest directory. - ## Deployment * [Local k8 Deployment](#local-k8-deployment) diff --git a/go.mod b/go.mod index 4287f862b..77cc1aeb0 100644 --- a/go.mod +++ b/go.mod @@ -8,10 +8,10 @@ require ( github.com/go-openapi/spec v0.20.9 github.com/hashicorp/go-version v1.6.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/rs/zerolog v1.30.0 - github.com/shirou/gopsutil/v3 v3.23.8 + github.com/rs/zerolog v1.31.0 + github.com/shirou/gopsutil/v3 v3.23.9 github.com/spf13/cobra v1.7.0 - github.com/tidwall/gjson v1.16.0 + github.com/tidwall/gjson v1.17.0 github.com/tidwall/pretty v1.2.1 github.com/tidwall/sjson v1.2.5 github.com/zekroTJA/timedmap v1.5.1 diff --git a/go.sum b/go.sum index 56472a12d..659bece08 100644 --- a/go.sum +++ b/go.sum @@ -82,11 +82,15 @@ github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUz github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= github.com/shirou/gopsutil/v3 v3.23.8 h1:xnATPiybo6GgdRoC4YoGnxXZFRc3dqQTGi73oLvvBrE= github.com/shirou/gopsutil/v3 v3.23.8/go.mod h1:7hmCaBn+2ZwaZOr6jmPBZDfawwMGuo1id3C6aM8EDqQ= +github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= +github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -112,6 +116,8 @@ github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw= github.com/tidwall/gjson v1.15.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= diff --git a/grafana/dashboards/7mode/lun7.json b/grafana/dashboards/7mode/lun7.json index 32357d25e..75c5519db 100644 --- a/grafana/dashboards/7mode/lun7.json +++ b/grafana/dashboards/7mode/lun7.json @@ -1286,7 +1286,7 @@ "type": "table" } ], - "title": "LUN Table Drilldown", + "title": "LUN Table", "type": "row" }, { @@ -1849,7 +1849,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Drilldown", + "title": "Top LUN Performance", "type": "row" }, { @@ -2068,7 +2068,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Efficiency Drilldown", + "title": "Top LUN Performance Efficiency", "type": "row" }, { @@ -2352,7 +2352,7 @@ "type": "timeseries" } ], - "title": "Top Volume and LUN Capacity Drilldown", + "title": "Top Volume and LUN Capacity", "type": "row" }, { @@ -2743,7 +2743,7 @@ "type": "timeseries" } ], - "title": "Per LUN Drilldown (Must Select Node/SVM/Volume/LUN)", + "title": "Per LUN (Must Select Node/SVM/Volume/LUN)", "type": "row" } ], diff --git a/grafana/dashboards/7mode/network7.json b/grafana/dashboards/7mode/network7.json index 7073c8e44..bfa68d734 100644 --- a/grafana/dashboards/7mode/network7.json +++ b/grafana/dashboards/7mode/network7.json @@ -1352,7 +1352,7 @@ "type": "timeseries" } ], - "title": "Ethernet Drilldown", + "title": "Ethernet", "type": "row" }, { @@ -1975,7 +1975,7 @@ "type": "timeseries" } ], - "title": "FibreChannel Drilldown", + "title": "FibreChannel", "type": "row" } ], diff --git a/grafana/dashboards/7mode/node7.json b/grafana/dashboards/7mode/node7.json index e356ad4c5..b237f8fdc 100644 --- a/grafana/dashboards/7mode/node7.json +++ b/grafana/dashboards/7mode/node7.json @@ -1520,7 +1520,7 @@ "type": "timeseries" } ], - "title": "Backend Drilldown", + "title": "Backend", "type": "row" }, { @@ -1636,7 +1636,7 @@ "type": "timeseries" } ], - "title": "CPU Layer Drilldown", + "title": "CPU Layer", "type": "row" }, { @@ -2015,7 +2015,7 @@ "type": "timeseries" } ], - "title": "Network Layer Drilldown", + "title": "Network Layer", "type": "row" }, { @@ -2243,7 +2243,7 @@ "type": "timeseries" } ], - "title": "CIFS Frontend Drilldown", + "title": "CIFS Frontend", "type": "row" }, { @@ -2794,7 +2794,7 @@ "type": "timeseries" } ], - "title": "NFSv3 Frontend Drilldown", + "title": "NFSv3 Frontend", "type": "row" }, { @@ -3112,7 +3112,7 @@ "type": "timeseries" } ], - "title": "iSCSI Frontend Drilldown", + "title": "iSCSI Frontend", "type": "row" } ], diff --git a/grafana/dashboards/7mode/volume7.json b/grafana/dashboards/7mode/volume7.json index dc52f6477..0d69f7354 100644 --- a/grafana/dashboards/7mode/volume7.json +++ b/grafana/dashboards/7mode/volume7.json @@ -1506,7 +1506,7 @@ "type": "table" } ], - "title": "Volume Table Drilldown", + "title": "Volume Table", "type": "row" }, { @@ -2066,7 +2066,7 @@ "type": "timeseries" } ], - "title": "Backend WAFL Drilldown", + "title": "Backend WAFL", "type": "row" }, { diff --git a/grafana/dashboards/cmode/aggregate.json b/grafana/dashboards/cmode/aggregate.json index bc0d075bd..34730c409 100644 --- a/grafana/dashboards/cmode/aggregate.json +++ b/grafana/dashboards/cmode/aggregate.json @@ -3913,7 +3913,7 @@ "type": "timeseries" } ], - "title": "Flash Pool Drilldown", + "title": "Flash Pool", "type": "row" }, { @@ -3925,23 +3925,695 @@ "x": 0, "y": 57 }, - "id": 81, + "id": 810, "panels": [ { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 82 + }, + "id": 195, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (aggr_space_performance_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggregatePerformanceTierFootprint\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}} - {{aggr}}", + "refId": "A" + } + ], + "title": "Top $TopResources Aggregates by Performance Tier Footprint", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 82 + }, + "id": 197, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (aggr_space_performance_tier_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggregatePerformanceTierFootprintPerc\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}} - {{aggr}}", + "refId": "A" + } + ], + "title": "Top $TopResources Aggregates by Performance Tier Footprint %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 94 + }, + "id": 199, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, aggr_space_capacity_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$TopAggregateCapacityTierFootprint\"})", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}} - {{aggr}}", + "refId": "A" + } + ], + "title": "Top $TopResources Aggregates by Capacity Tier Footprint", + "transformations": [], + "type": "timeseries" + } + ], + "title": "FabricPool", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 106 + }, + "id": 81, + "panels": [ + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Space Used", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 107 + }, + "id": 83, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsed\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "D" + } + ], + "title": "Top $TopResources Volumes by Space Used by Aggregate", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Space Used %", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 107 + }, + "id": 84, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsedPercent\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Space Used %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Snapshot Space Used", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 119 + }, + "id": 87, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsed\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Snapshot Space Used", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.\n\nNote that in some scenarios, it is possible to exceed 100% of the space allocated.\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Snapshot Space Used %", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 119 + }, + "id": 85, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsedPercent\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "C" + } + ], + "title": "Top $TopResources Volumes by Snapshot Space Used %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Space Used", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, - "gradientMode": "opacity", + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, @@ -3971,6 +4643,10 @@ { "color": "green", "value": null + }, + { + "color": "red", + "value": 80 } ] }, @@ -3982,9 +4658,9 @@ "h": 12, "w": 12, "x": 0, - "y": 82 + "y": 131 }, - "id": 83, + "id": 95, "options": { "legend": { "calcs": [ @@ -4006,27 +4682,27 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsed\"})", + "expr": "topk($TopResources, (volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumePerformanceTierFootprint\"}))", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", - "refId": "D" + "legendFormat": "{{volume}} ", + "refId": "A" } ], - "title": "Top $TopResources Volumes by Space Used by Aggregate", + "title": "Top $TopResources Volumes by Performance Tier Footprint", "transformations": [], "type": "timeseries" }, { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Space Used %", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -4077,9 +4753,9 @@ "h": 12, "w": 12, "x": 12, - "y": 82 + "y": 131 }, - "id": 84, + "id": 97, "options": { "legend": { "calcs": [ @@ -4101,27 +4777,27 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsedPercent\"})", + "expr": "topk($TopResources, (volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumePerformanceTierFootprintPerc\"}))", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "legendFormat": "{{volume}}", "refId": "A" } ], - "title": "Top $TopResources Volumes by Space Used %", + "title": "Top $TopResources Volumes by Performance Tier Footprint %", "transformations": [], "type": "timeseries" }, { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Snapshot Space Used", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -4171,9 +4847,9 @@ "h": 12, "w": 12, "x": 0, - "y": 94 + "y": 143 }, - "id": 87, + "id": 99, "options": { "legend": { "calcs": [ @@ -4195,27 +4871,27 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsed\"})", + "expr": "topk($TopResources, volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeCapacityTierFootprint\"})", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "legendFormat": "{{volume}} ", "refId": "A" } ], - "title": "Top $TopResources Volumes by Snapshot Space Used", + "title": "Top $TopResources Volumes by Capacity Tier Footprint", "transformations": [], "type": "timeseries" }, { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.\n\nNote that in some scenarios, it is possible to exceed 100% of the space allocated.\n", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Snapshot Space Used %", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -4266,9 +4942,9 @@ "h": 12, "w": 12, "x": 12, - "y": 94 + "y": 143 }, - "id": 85, + "id": 101, "options": { "legend": { "calcs": [ @@ -4290,14 +4966,14 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsedPercent\"})", + "expr": "topk($TopResources, volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeCapacityTierFootprintPerc\"})", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", - "refId": "C" + "legendFormat": "{{volume}}", + "refId": "A" } ], - "title": "Top $TopResources Volumes by Snapshot Space Used %", + "title": "Top $TopResources Volumes by Capacity Tier Footprint %", "transformations": [], "type": "timeseries" } @@ -4312,7 +4988,7 @@ "h": 1, "w": 24, "x": 0, - "y": 58 + "y": 155 }, "id": 28, "panels": [ @@ -4374,7 +5050,7 @@ "h": 9, "w": 8, "x": 0, - "y": 6 + "y": 156 }, "id": 88, "options": { @@ -4463,7 +5139,7 @@ "h": 9, "w": 8, "x": 8, - "y": 6 + "y": 156 }, "id": 89, "options": { @@ -4553,7 +5229,7 @@ "h": 9, "w": 8, "x": 16, - "y": 6 + "y": 156 }, "id": 90, "options": { @@ -4642,7 +5318,7 @@ "h": 9, "w": 8, "x": 0, - "y": 15 + "y": 165 }, "id": 91, "options": { @@ -4731,7 +5407,7 @@ "h": 9, "w": 8, "x": 8, - "y": 15 + "y": 165 }, "id": 92, "options": { @@ -4820,7 +5496,7 @@ "h": 9, "w": 8, "x": 16, - "y": 15 + "y": 165 }, "id": 93, "options": { @@ -4852,7 +5528,7 @@ "type": "timeseries" } ], - "title": "Busy Volumes Drilldown", + "title": "Busy Volumes", "type": "row" } ], @@ -5333,6 +6009,167 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(aggr_space_capacity_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$Aggregate\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopAggregateCapacityTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(aggr_space_capacity_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$Aggregate\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*aggr=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopAggregatePerformanceTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*aggr=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopAggregatePerformanceTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*aggr=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, diff --git a/grafana/dashboards/cmode/cluster.json b/grafana/dashboards/cmode/cluster.json index 8ad681865..fad7836bb 100644 --- a/grafana/dashboards/cmode/cluster.json +++ b/grafana/dashboards/cmode/cluster.json @@ -3967,7 +3967,7 @@ "type": "timeseries" } ], - "title": "SVM Performance Drilldown", + "title": "SVM Performance", "type": "row" } ], diff --git a/grafana/dashboards/cmode/lun.json b/grafana/dashboards/cmode/lun.json index 896335bc6..661f08513 100644 --- a/grafana/dashboards/cmode/lun.json +++ b/grafana/dashboards/cmode/lun.json @@ -1415,7 +1415,7 @@ "type": "table" } ], - "title": "LUN Table Drilldown", + "title": "LUN Table", "type": "row" }, { @@ -1978,7 +1978,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Drilldown", + "title": "Top LUN Performance", "type": "row" }, { @@ -2299,7 +2299,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Efficiency Drilldown", + "title": "Top LUN Performance Efficiency", "type": "row" }, { @@ -3891,7 +3891,7 @@ "type": "timeseries" } ], - "title": "Top Volume and LUN Capacity Drilldown", + "title": "Top Volume and LUN Capacity", "type": "row" }, { @@ -4526,7 +4526,7 @@ "type": "timeseries" } ], - "title": "Per LUN Drilldown (Must Select Cluster/SVM/Volume/LUN)", + "title": "Per LUN (Must Select Cluster/SVM/Volume/LUN)", "type": "row" } ], diff --git a/grafana/dashboards/cmode/mcc_cluster.json b/grafana/dashboards/cmode/mcc_cluster.json index 5e21b3790..26a73ed5f 100644 --- a/grafana/dashboards/cmode/mcc_cluster.json +++ b/grafana/dashboards/cmode/mcc_cluster.json @@ -1609,7 +1609,7 @@ } ], "repeat": null, - "title": "MetroCluster FCVI Drilldown", + "title": "MetroCluster FCVI", "type": "row" }, { @@ -1997,7 +1997,7 @@ } ], "repeat": null, - "title": "MetroCluster Iwarp Drilldown", + "title": "MetroCluster Iwarp", "type": "row" }, { @@ -2622,7 +2622,7 @@ } ], "repeat": null, - "title": "MetroCluster Disk Drilldown", + "title": "MetroCluster Disk", "type": "row" }, { @@ -2824,7 +2824,7 @@ } ], "repeat": null, - "title": "Disk and Tape Adapter Drilldown", + "title": "Disk and Tape Adapter", "type": "row" }, { @@ -3392,7 +3392,7 @@ } ], "repeat": null, - "title": "MetroCluster FibreBridge/Array Drilldown", + "title": "MetroCluster FibreBridge/Array", "type": "row" } ], diff --git a/grafana/dashboards/cmode/metadata.json b/grafana/dashboards/cmode/metadata.json index 8e8b0d7d2..ec88df431 100644 --- a/grafana/dashboards/cmode/metadata.json +++ b/grafana/dashboards/cmode/metadata.json @@ -3795,7 +3795,7 @@ "type": "timeseries" } ], - "title": "Collectors Drilldown", + "title": "Collectors", "type": "row" }, { @@ -4030,7 +4030,7 @@ "type": "timeseries" } ], - "title": "Prometheus Drilldown", + "title": "Prometheus", "type": "row" } ], diff --git a/grafana/dashboards/cmode/namespace.json b/grafana/dashboards/cmode/namespace.json index 46ce473bb..b072d6501 100644 --- a/grafana/dashboards/cmode/namespace.json +++ b/grafana/dashboards/cmode/namespace.json @@ -88,7 +88,7 @@ }, "id": 28, "panels": [], - "title": "Top NVMe Namespaces Performance Drilldown", + "title": "Top NVMe Namespaces Performance", "type": "row" }, { @@ -899,7 +899,7 @@ "type": "table" } ], - "title": "NVMe Namespaces Table Drilldown", + "title": "NVMe Namespaces Table", "type": "row" } ], diff --git a/grafana/dashboards/cmode/network.json b/grafana/dashboards/cmode/network.json index d7846b639..77097072a 100644 --- a/grafana/dashboards/cmode/network.json +++ b/grafana/dashboards/cmode/network.json @@ -1399,7 +1399,7 @@ "type": "timeseries" } ], - "title": "Ethernet Drilldown", + "title": "Ethernet", "type": "row" }, { @@ -2457,7 +2457,7 @@ "type": "timeseries" } ], - "title": "FibreChannel Drilldown", + "title": "FibreChannel", "type": "row" }, { @@ -3018,7 +3018,7 @@ "type": "timeseries" } ], - "title": "NVMe/FC Drilldown", + "title": "NVMe/FC", "type": "row" }, { diff --git a/grafana/dashboards/cmode/nfs4storePool.json b/grafana/dashboards/cmode/nfs4storePool.json index 97ddcc551..65eed5f23 100644 --- a/grafana/dashboards/cmode/nfs4storePool.json +++ b/grafana/dashboards/cmode/nfs4storePool.json @@ -2000,7 +2000,7 @@ "type": "timeseries" } ], - "title": "Lock Drilldown", + "title": "Lock", "type": "row" } ], diff --git a/grafana/dashboards/cmode/node.json b/grafana/dashboards/cmode/node.json index b551b8aae..396babd25 100644 --- a/grafana/dashboards/cmode/node.json +++ b/grafana/dashboards/cmode/node.json @@ -1294,7 +1294,7 @@ "type": "timeseries" } ], - "title": "CPU Layer Drilldown", + "title": "CPU Layer", "type": "row" }, { @@ -1673,7 +1673,7 @@ "type": "timeseries" } ], - "title": "Network Layer Drilldown", + "title": "Network Layer", "type": "row" }, { @@ -2366,7 +2366,7 @@ "type": "timeseries" } ], - "title": "Backend Drilldown", + "title": "Backend", "type": "row" }, { @@ -3112,7 +3112,7 @@ "type": "timeseries" } ], - "title": "NFSv3 Frontend Drilldown", + "title": "NFSv3 Frontend", "type": "row" }, { @@ -3456,7 +3456,7 @@ "type": "timeseries" } ], - "title": "CIFS Frontend Drilldown", + "title": "CIFS Frontend", "type": "row" }, { @@ -3870,7 +3870,7 @@ "type": "timeseries" } ], - "title": "FCP Frontend Drilldown", + "title": "FCP Frontend", "type": "row" }, { @@ -4286,7 +4286,7 @@ "type": "timeseries" } ], - "title": "NVMe/FC Frontend Drilldown", + "title": "NVMe/FC Frontend", "type": "row" }, { @@ -4700,7 +4700,7 @@ "type": "timeseries" } ], - "title": "iSCSI Frontend Drilldown", + "title": "iSCSI Frontend", "type": "row" } ], diff --git a/grafana/dashboards/cmode/s3ObjectStorage.json b/grafana/dashboards/cmode/s3ObjectStorage.json index a36fe1dc6..edde1e1a9 100644 --- a/grafana/dashboards/cmode/s3ObjectStorage.json +++ b/grafana/dashboards/cmode/s3ObjectStorage.json @@ -112,7 +112,7 @@ "y": 3 }, "id": 16, - "title": "Bucket Drilldown", + "title": "Bucket", "type": "row" }, { diff --git a/grafana/dashboards/cmode/svm.json b/grafana/dashboards/cmode/svm.json index ad1e67e76..361fee0dc 100644 --- a/grafana/dashboards/cmode/svm.json +++ b/grafana/dashboards/cmode/svm.json @@ -1506,7 +1506,7 @@ "type": "timeseries" } ], - "title": "Volumes Performance Drilldown", + "title": "Volumes Performance", "type": "row" }, { @@ -2203,7 +2203,7 @@ "type": "timeseries" } ], - "title": "LIF Drilldown", + "title": "LIF", "type": "row" }, { @@ -3265,7 +3265,7 @@ "type": "timeseries" } ], - "title": "CIFS Drilldown", + "title": "CIFS", "type": "row" }, { @@ -4326,7 +4326,7 @@ "type": "timeseries" } ], - "title": "FCP Drilldown", + "title": "FCP", "type": "row" }, { @@ -5273,7 +5273,7 @@ "type": "timeseries" } ], - "title": "iSCSI Drilldown", + "title": "iSCSI", "type": "row" }, { @@ -6556,7 +6556,7 @@ "type": "timeseries" } ], - "title": "NFSv3 Drilldown", + "title": "NFSv3", "type": "row" }, { @@ -7856,7 +7856,7 @@ "type": "timeseries" } ], - "title": "NFSv4 Drilldown", + "title": "NFSv4", "type": "row" }, { @@ -9164,7 +9164,7 @@ "type": "timeseries" } ], - "title": "NFSv4.1 Drilldown", + "title": "NFSv4.1", "type": "row" }, { @@ -10226,7 +10226,7 @@ "type": "timeseries" } ], - "title": "NVMe/FC Drilldown", + "title": "NVMe/FC", "type": "row" }, { @@ -10480,7 +10480,7 @@ "type": "timeseries" } ], - "title": "Copy Offload Drilldown", + "title": "Copy Offload", "type": "row" }, { @@ -11417,7 +11417,7 @@ "type": "timeseries" } ], - "title": "QoS Policy Group Drilldown", + "title": "QoS Policy Group", "type": "row" }, { @@ -12242,7 +12242,7 @@ "type": "timeseries" } ], - "title": "QoS Policy Group Latency from Resource Drilldown", + "title": "QoS Policy Group Latency from Resource", "type": "row" }, { @@ -12886,7 +12886,7 @@ "type": "timeseries" } ], - "title": "Volume Capacity Drilldown", + "title": "Volume Capacity", "type": "row" }, { diff --git a/grafana/dashboards/cmode/volume.json b/grafana/dashboards/cmode/volume.json index dd12e165c..9e31e63a5 100644 --- a/grafana/dashboards/cmode/volume.json +++ b/grafana/dashboards/cmode/volume.json @@ -1675,7 +1675,7 @@ "type": "table" } ], - "title": "Volume Table Drilldown", + "title": "Volume Table", "type": "row" }, { @@ -2602,7 +2602,7 @@ "type": "timeseries" } ], - "title": "Volume WAFL Layer Drilldown", + "title": "Volume WAFL Layer", "type": "row" }, { @@ -3356,7 +3356,7 @@ "type": "timeseries" } ], - "title": "Top Volume End-to-End QoS Drilldown", + "title": "Top Volume End-to-End QoS", "type": "row" }, { @@ -4383,7 +4383,7 @@ "type": "timeseries" } ], - "title": "Top Volume QoS Resource Latency Drilldown", + "title": "Top Volume QoS Resource Latency", "type": "row" }, { @@ -5320,7 +5320,7 @@ "type": "table" } ], - "title": "Top Volume FabricPool Drilldown", + "title": "Top Volume Object Storage", "type": "row" }, { @@ -5332,6 +5332,399 @@ "x": 0, "y": 19 }, + "id": 99, + "panels": [ + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 119, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumePerformanceTierFootprint\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Performance Tier Footprint", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 120, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumePerformanceTierFootprintPerc\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Performance Tier Footprint %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 121, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumeCapacityTierFootprint\"})", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Capacity Tier Footprint", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 122, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumeCapacityTierFootprintPerc\"})", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Capacity Tier Footprint %", + "transformations": [], + "type": "timeseries" + } + ], + "title": "Top Volume FabricPool", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, "id": 98, "panels": [ { @@ -5391,7 +5784,7 @@ "h": 8, "w": 12, "x": 0, - "y": 20 + "y": 21 }, "id": 100, "options": { @@ -5478,7 +5871,7 @@ "h": 8, "w": 12, "x": 12, - "y": 20 + "y": 21 }, "id": 102, "options": { @@ -5566,7 +5959,7 @@ "h": 8, "w": 24, "x": 0, - "y": 28 + "y": 29 }, "id": 101, "options": { @@ -5596,7 +5989,7 @@ "type": "timeseries" } ], - "title": "Top Inode Drilldown", + "title": "Top Inode", "type": "row" }, { @@ -5606,7 +5999,7 @@ "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 21 }, "id": 105, "panels": [ @@ -5616,7 +6009,7 @@ "h": 2, "w": 24, "x": 0, - "y": 21 + "y": 22 }, "id": 110, "options": { @@ -5687,7 +6080,7 @@ "h": 8, "w": 8, "x": 0, - "y": 23 + "y": 24 }, "id": 108, "options": { @@ -5779,7 +6172,7 @@ "h": 8, "w": 8, "x": 8, - "y": 23 + "y": 24 }, "id": 106, "options": { @@ -5871,7 +6264,7 @@ "h": 8, "w": 8, "x": 16, - "y": 23 + "y": 24 }, "id": 107, "options": { @@ -7077,6 +7470,98 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, diff --git a/grafana/dashboards/influxdb/metadata.json b/grafana/dashboards/influxdb/metadata.json index 46af7f01b..6c5b0b728 100644 --- a/grafana/dashboards/influxdb/metadata.json +++ b/grafana/dashboards/influxdb/metadata.json @@ -3842,7 +3842,7 @@ }, "id": 60, "panels": [], - "title": "Collectors Drilldown", + "title": "Collectors", "type": "row" }, { diff --git a/grafana/dashboards/influxdb/network.json b/grafana/dashboards/influxdb/network.json index a626ca4b4..748d7d02f 100644 --- a/grafana/dashboards/influxdb/network.json +++ b/grafana/dashboards/influxdb/network.json @@ -79,7 +79,7 @@ }, "id": 105, "panels": [], - "title": "Ethernet Drilldown", + "title": "Ethernet", "type": "row" }, { @@ -3956,7 +3956,7 @@ } } ], - "title": "FibreChannel Drilldown", + "title": "FibreChannel", "type": "row" } ], diff --git a/grafana/dashboards/influxdb/snapmirror.json b/grafana/dashboards/influxdb/snapmirror.json index 41309af29..354b7f939 100644 --- a/grafana/dashboards/influxdb/snapmirror.json +++ b/grafana/dashboards/influxdb/snapmirror.json @@ -1940,7 +1940,7 @@ } } ], - "title": "Destination Node DrillDown", + "title": "Destination Node", "type": "row" }, { @@ -3190,7 +3190,7 @@ } } ], - "title": "Destination SVM Drilldown", + "title": "Destination SVM", "type": "row" } ], diff --git a/grafana/dashboards/influxdb/svm.json b/grafana/dashboards/influxdb/svm.json index 92f323ae0..ee0a39da2 100644 --- a/grafana/dashboards/influxdb/svm.json +++ b/grafana/dashboards/influxdb/svm.json @@ -73,7 +73,7 @@ }, "id": 37, "panels": [], - "title": "NFS${NFSv} Frontend Drilldown", + "title": "NFS${NFSv} Frontend", "type": "row" }, { diff --git a/harvest.cue b/harvest.cue index ae33e119d..3b8b21345 100644 --- a/harvest.cue +++ b/harvest.cue @@ -50,8 +50,8 @@ label: [string]: string } #CertificateScript: { - path: string - timeout?: string + path: string + timeout?: string } #CredentialsScript: { @@ -69,11 +69,13 @@ Pollers: [Name=_]: #Poller #Poller: { addr?: string auth_style?: "basic_auth" | "certificate_auth" + ca_cert?: string + certificate_script?: #CertificateScript client_timeout?: string collectors?: [...#CollectorDef] | [...string] + conf_path?: string credentials_file?: string credentials_script?: #CredentialsScript - certificate_script?: #CertificateScript datacenter?: string exporters: [...string] is_kfs?: bool diff --git a/pkg/conf/conf.go b/pkg/conf/conf.go index ae14956f0..64c52cbdc 100644 --- a/pkg/conf/conf.go +++ b/pkg/conf/conf.go @@ -6,6 +6,7 @@ package conf import ( "dario.cat/mergo" + "errors" "fmt" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -15,6 +16,7 @@ import ( "os" "path/filepath" "regexp" + "sort" "strconv" ) @@ -31,7 +33,7 @@ const ( HomeEnvVar = "HARVEST_CONF" ) -// TestLoadHarvestConfig is used by testing code to reload a new config +// TestLoadHarvestConfig loads a new config - used by testing code func TestLoadHarvestConfig(configPath string) { configRead = false Config = HarvestConfig{} @@ -59,11 +61,17 @@ func ConfigPath(path string) string { } func LoadHarvestConfig(configPath string) (string, error) { + var ( + contents []byte + duplicates []error + err error + ) + configPath = ConfigPath(configPath) if configRead { return configPath, nil } - contents, err := os.ReadFile(configPath) + contents, err = os.ReadFile(configPath) if err != nil { return "", fmt.Errorf("error reading %s err=%w", configPath, err) @@ -73,27 +81,86 @@ func LoadHarvestConfig(configPath string) (string, error) { fmt.Printf("error unmarshalling config file=[%s] %+v\n", configPath, err) return "", err } + + for _, pat := range Config.PollerFiles { + fs, err := filepath.Glob(pat) + if err != nil { + return "", fmt.Errorf("error retrieving poller_files path=%s err=%w", pat, err) + } + + sort.Strings(fs) + + if len(fs) == 0 { + fmt.Printf("add 0 poller(s) from poller_file=%s because no matching paths\n", pat) + continue + } + + for _, filename := range fs { + fsContents, err := os.ReadFile(filename) + if err != nil { + return "", fmt.Errorf("error reading poller_file=%s err=%w", filename, err) + } + cfg, err := unmarshalConfig(fsContents) + if err != nil { + return "", fmt.Errorf("error unmarshalling poller_file=%s err=%w", filename, err) + } + for _, pName := range cfg.PollersOrdered { + _, ok := Config.Pollers[pName] + if ok { + duplicates = append(duplicates, fmt.Errorf("poller name=%s from poller_file=%s is not unique", pName, filename)) + continue + } + Config.Pollers[pName] = cfg.Pollers[pName] + Config.PollersOrdered = append(Config.PollersOrdered, pName) + } + fmt.Printf("add %d poller(s) from poller_file=%s\n", len(cfg.PollersOrdered), filename) + } + } + + if len(duplicates) > 0 { + return "", errors.Join(duplicates...) + } + + // Fix promIndex for combined pollers + for i, name := range Config.PollersOrdered { + Config.Pollers[name].promIndex = i + } return configPath, nil } -func DecodeConfig(contents []byte) error { - err := yaml.Unmarshal(contents, &Config) - configRead = true +func unmarshalConfig(contents []byte) (*HarvestConfig, error) { + var ( + cfg HarvestConfig + orderedConfig OrderedConfig + err error + ) + + err = yaml.Unmarshal(contents, &cfg) if err != nil { - return fmt.Errorf("error unmarshalling config err: %w", err) + return nil, fmt.Errorf("error unmarshalling config: %w", err) } - // Until https://github.com/go-yaml/yaml/issues/717 is fixed - // read the yaml again to determine poller order - orderedConfig := OrderedConfig{} + + // Read the yaml again to determine poller order err = yaml.Unmarshal(contents, &orderedConfig) if err != nil { - return err + return nil, fmt.Errorf("error unmarshalling ordered config: %w", err) } - Config.PollersOrdered = orderedConfig.Pollers.namesInOrder + cfg.PollersOrdered = orderedConfig.Pollers.namesInOrder for i, name := range Config.PollersOrdered { Config.Pollers[name].promIndex = i } + return &cfg, nil +} + +func DecodeConfig(contents []byte) error { + cfg, err := unmarshalConfig(contents) + configRead = true + if err != nil { + return fmt.Errorf("error unmarshalling config err: %w", err) + } + Config = *cfg + // Merge pollers and defaults pollers := Config.Pollers defaults := Config.Defaults @@ -293,8 +360,8 @@ func (i *IntRange) UnmarshalYAML(node *yaml.Node) error { return nil } -// GetUniqueExporters returns the unique set of exporter types from the list of export names -// For example: If 2 prometheus exporters are configured for a poller, the last one is returned +// GetUniqueExporters returns the unique set of exporter types from the list of export names. +// For example, if two prometheus exporters are configured for a poller, the last one is returned func GetUniqueExporters(exporterNames []string) []string { var resultExporters []string definedExporters := Config.Exporters @@ -572,6 +639,7 @@ type HarvestConfig struct { Tools *Tools `yaml:"Tools,omitempty"` Exporters map[string]Exporter `yaml:"Exporters,omitempty"` Pollers map[string]*Poller `yaml:"Pollers,omitempty"` + PollerFiles []string `yaml:"Poller_files,omitempty"` Defaults *Poller `yaml:"Defaults,omitempty"` Admin Admin `yaml:"Admin,omitempty"` PollersOrdered []string // poller names in same order as yaml config diff --git a/pkg/conf/conf_test.go b/pkg/conf/conf_test.go index 5e5a49da2..51d30b234 100644 --- a/pkg/conf/conf_test.go +++ b/pkg/conf/conf_test.go @@ -5,6 +5,7 @@ import ( "reflect" "sort" "strconv" + "strings" "testing" ) @@ -284,8 +285,7 @@ func TestNodeToPoller(t *testing.T) { func TestReadHarvestConfigFromEnv(t *testing.T) { t.Helper() - configRead = false - Config = HarvestConfig{} + resetConfig() t.Setenv(HomeEnvVar, "testdata") cp, err := LoadHarvestConfig(HarvestYML) if err != nil { @@ -301,3 +301,58 @@ func TestReadHarvestConfigFromEnv(t *testing.T) { t.Errorf("check if star poller exists. got=nil want=poller") } } + +func resetConfig() { + configRead = false + Config = HarvestConfig{} +} + +func TestMultiplePollerFiles(t *testing.T) { + t.Helper() + resetConfig() + _, err := LoadHarvestConfig("testdata/pollerFiles/harvest.yml") + + wantNumErrs := 2 + numErrs := strings.Count(err.Error(), "\n") + 1 + if numErrs != wantNumErrs { + t.Errorf("got %d errors, want %d", numErrs, wantNumErrs) + } + + wantNumPollers := 10 + if len(Config.Pollers) != wantNumPollers { + t.Errorf("got %d pollers, want %d", len(Config.Pollers), wantNumPollers) + } + + if len(Config.PollersOrdered) != wantNumPollers { + t.Errorf("got %d ordered pollers, want %d", len(Config.PollersOrdered), wantNumPollers) + } + + wantToken := "token" + if Config.Tools.GrafanaAPIToken != wantToken { + t.Errorf("got token=%s, want token=%s", Config.Tools.GrafanaAPIToken, wantToken) + } + + orderWanted := []string{ + "star", + "netapp1", + "netapp2", + "netapp3", + "netapp4", + "netapp5", + "netapp6", + "netapp7", + "netapp8", + "moon", + } + + for i, n := range orderWanted { + named, err := PollerNamed(n) + if err != nil { + t.Errorf("got no poller, want poller named=%s", n) + continue + } + if named.promIndex != i { + t.Errorf("got promIndex=%d, want promIndex=%d", named.promIndex, i) + } + } +} diff --git a/pkg/conf/testdata/pollerFiles/dup.yml b/pkg/conf/testdata/pollerFiles/dup.yml new file mode 100644 index 000000000..7c1c5be71 --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/dup.yml @@ -0,0 +1,4 @@ + +Pollers: + star: + addr: localhost diff --git a/pkg/conf/testdata/pollerFiles/harvest.yml b/pkg/conf/testdata/pollerFiles/harvest.yml new file mode 100644 index 000000000..8e744cc64 --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/harvest.yml @@ -0,0 +1,16 @@ +Tools: + grafana_api_token: token + +Poller_files: + - testdata/pollerFiles/many/*.yml + - testdata/pollerFiles/single.yml + - testdata/pollerFiles/missing1.yml + - testdata/pollerFiles/missing2.yml + - testdata/pollerFiles/single.yml # will cause duplicate because it is listed twice + - testdata/pollerFiles/dup.yml # will cause duplicate because it contains star again + +Pollers: + star: + addr: localhost + collectors: + - Simple diff --git a/pkg/conf/testdata/pollerFiles/many/00.yml b/pkg/conf/testdata/pollerFiles/many/00.yml new file mode 100644 index 000000000..06761669f --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/many/00.yml @@ -0,0 +1,16 @@ +Pollers: + netapp1: + datacenter: rtp + addr: 1.1.1.1 + netapp2: + datacenter: rtp + addr: 1.1.1.2 + netapp3: + datacenter: rtp + addr: 1.1.1.3 + netapp4: + datacenter: rtp + addr: 1.1.1.4 + +Tools: + grafana_api_token: ignore diff --git a/pkg/conf/testdata/pollerFiles/many/b.yml b/pkg/conf/testdata/pollerFiles/many/b.yml new file mode 100644 index 000000000..08a8bae2d --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/many/b.yml @@ -0,0 +1,13 @@ +Pollers: + netapp5: + datacenter: blr + addr: 1.1.1.5 + netapp6: + datacenter: blr + addr: 1.1.1.6 + netapp7: + datacenter: blr + addr: 1.1.1.7 + netapp8: + datacenter: blr + addr: 1.1.1.8 diff --git a/pkg/conf/testdata/pollerFiles/many/nomatch.yaml b/pkg/conf/testdata/pollerFiles/many/nomatch.yaml new file mode 100644 index 000000000..06761669f --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/many/nomatch.yaml @@ -0,0 +1,16 @@ +Pollers: + netapp1: + datacenter: rtp + addr: 1.1.1.1 + netapp2: + datacenter: rtp + addr: 1.1.1.2 + netapp3: + datacenter: rtp + addr: 1.1.1.3 + netapp4: + datacenter: rtp + addr: 1.1.1.4 + +Tools: + grafana_api_token: ignore diff --git a/pkg/conf/testdata/pollerFiles/single.yml b/pkg/conf/testdata/pollerFiles/single.yml new file mode 100644 index 000000000..fa45e5fab --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/single.yml @@ -0,0 +1,14 @@ + +Poller_files: # these will be ignored since they are in single.yml + - testdata/pollerFiles/many/*.yml + - testdata/pollerFiles/single.yml + - testdata/pollerFiles/missing1.yml + - testdata/pollerFiles/missing2.yml + - testdata/pollerFiles/missing3.yml + - testdata/pollerFiles/single.yml + +Pollers: + moon: + addr: localhost + collectors: + - Simple diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md index b83ae159d..972b729fb 100644 --- a/vendor/github.com/rs/zerolog/README.md +++ b/vendor/github.com/rs/zerolog/README.md @@ -1,6 +1,6 @@ # Zero Allocation JSON Logger -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://github.com/rs/zerolog/actions/workflows/test.yml/badge.svg)](https://github.com/rs/zerolog/actions/workflows/test.yml) [![Go Coverage](https://github.com/rs/zerolog/wiki/coverage.svg)](https://raw.githack.com/wiki/rs/zerolog/coverage.html) The zerolog package provides a fast and simple logger dedicated to JSON output. @@ -499,7 +499,7 @@ log.Ctx(ctx).Info().Msg("hello world") ### Set as standard logger output ```go -stdlog := zerolog.New(os.Stdout).With(). +log := zerolog.New(os.Stdout).With(). Str("foo", "bar"). Logger() @@ -694,7 +694,7 @@ with zerolog library is [CSD](https://github.com/toravir/csd/). ## Benchmarks -See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. +See [logbench](http://bench.zerolog.io/) for more comprehensive and up-to-date benchmarks. All operations are allocation free (those numbers *include* JSON encoding): diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go index 8b0e0c619..282798853 100644 --- a/vendor/github.com/rs/zerolog/console.go +++ b/vendor/github.com/rs/zerolog/console.go @@ -312,6 +312,11 @@ func needsQuote(s string) bool { // colorize returns the string s wrapped in ANSI code c, unless disabled is true. func colorize(s interface{}, c int, disabled bool) string { + e := os.Getenv("NO_COLOR") + if e != "" { + disabled = true + } + if disabled { return fmt.Sprintf("%s", s) } diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go index 9d860e507..fc62ad9c1 100644 --- a/vendor/github.com/rs/zerolog/context.go +++ b/vendor/github.com/rs/zerolog/context.go @@ -57,7 +57,7 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context { // Object marshals an object that implement the LogObjectMarshaler interface. func (c Context) Object(key string, obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0) e.Object(key, obj) c.l.context = enc.AppendObjectData(c.l.context, e.buf) putEvent(e) @@ -66,7 +66,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context { // EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. func (c Context) EmbedObject(obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0) e.EmbedObject(obj) c.l.context = enc.AppendObjectData(c.l.context, e.buf) putEvent(e) @@ -379,6 +379,11 @@ func (c Context) Interface(key string, i interface{}) Context { return c } +// Any is a wrapper around Context.Interface. +func (c Context) Any(key string, i interface{}) Context { + return c.Interface(key, i) +} + type callerHook struct { callerSkipFrameCount int } diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go index e7b5126e9..834c7e604 100644 --- a/vendor/github.com/rs/zerolog/log.go +++ b/vendor/github.com/rs/zerolog/log.go @@ -250,7 +250,7 @@ func New(w io.Writer) Logger { } lw, ok := w.(LevelWriter) if !ok { - lw = levelWriterAdapter{w} + lw = LevelWriterAdapter{w} } return Logger{w: lw, level: TraceLevel} } diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go index 26f5e6325..9b9ef88e8 100644 --- a/vendor/github.com/rs/zerolog/writer.go +++ b/vendor/github.com/rs/zerolog/writer.go @@ -17,11 +17,13 @@ type LevelWriter interface { WriteLevel(level Level, p []byte) (n int, err error) } -type levelWriterAdapter struct { +// LevelWriterAdapter adapts an io.Writer to support the LevelWriter interface. +type LevelWriterAdapter struct { io.Writer } -func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { +// WriteLevel simply writes everything to the adapted writer, ignoring the level. +func (lw LevelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { return lw.Write(p) } @@ -38,7 +40,7 @@ func SyncWriter(w io.Writer) io.Writer { if lw, ok := w.(LevelWriter); ok { return &syncWriter{lw: lw} } - return &syncWriter{lw: levelWriterAdapter{w}} + return &syncWriter{lw: LevelWriterAdapter{w}} } // Write implements the io.Writer interface. @@ -96,7 +98,7 @@ func MultiLevelWriter(writers ...io.Writer) LevelWriter { if lw, ok := w.(LevelWriter); ok { lwriters = append(lwriters, lw) } else { - lwriters = append(lwriters, levelWriterAdapter{w}) + lwriters = append(lwriters, LevelWriterAdapter{w}) } } return multiLevelWriter{lwriters} @@ -152,3 +154,29 @@ func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { w.Out = TestWriter{T: t, Frame: 6} } } + +// FilteredLevelWriter writes only logs at Level or above to Writer. +// +// It should be used only in combination with MultiLevelWriter when you +// want to write to multiple destinations at different levels. Otherwise +// you should just set the level on the logger and filter events early. +// When using MultiLevelWriter then you set the level on the logger to +// the lowest of the levels you use for writers. +type FilteredLevelWriter struct { + Writer LevelWriter + Level Level +} + +// Write writes to the underlying Writer. +func (w *FilteredLevelWriter) Write(p []byte) (int, error) { + return w.Writer.Write(p) +} + +// WriteLevel calls WriteLevel of the underlying Writer only if the level is equal +// or above the Level. +func (w *FilteredLevelWriter) WriteLevel(level Level, p []byte) (int, error) { + if level >= w.Level { + return w.Writer.WriteLevel(level, p) + } + return len(p), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go index 6d7007ff9..089f603c8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!dragonfly,!plan9,!aix +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix +// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go new file mode 100644 index 000000000..1f66be342 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go @@ -0,0 +1,119 @@ +//go:build netbsd +// +build netbsd + +package cpu + +import ( + "context" + "fmt" + "runtime" + "unsafe" + + "github.com/shirou/gopsutil/v3/internal/common" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +const ( + // sys/sysctl.h + ctlKern = 1 // "high kernel": proc, limits + ctlHw = 6 // CTL_HW + kernCpTime = 51 // KERN_CPTIME +) + +var ClocksPerSec = float64(100) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { + if !percpu { + mib := []int32{ctlKern, kernCpTime} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + stat := TimesStat{ + CPU: "cpu-total", + User: float64(times.User), + Nice: float64(times.Nice), + System: float64(times.Sys), + Idle: float64(times.Idle), + Irq: float64(times.Intr), + } + return []TimesStat{stat}, nil + } + + ncpu, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return + } + + var i uint32 + for i = 0; i < ncpu; i++ { + mib := []int32{ctlKern, kernCpTime, int32(i)} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + + stats := (*cpuTimes)(unsafe.Pointer(&buf[0])) + ret = append(ret, TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(stats.User), + Nice: float64(stats.Nice), + System: float64(stats.Sys), + Idle: float64(stats.Idle), + Irq: float64(stats.Intr), + }) + } + + return ret, nil +} + +// Returns only one (minimal) CPUInfoStat on NetBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var err error + + c := InfoStat{} + + mhz, err := unix.Sysctl("machdep.dmi.processor-frequency") + if err != nil { + return nil, err + } + _, err = fmt.Sscanf(mhz, "%f", &c.Mhz) + if err != nil { + return nil, err + } + + ncpu, err := unix.SysctlUint32("hw.ncpuonline") + if err != nil { + return nil, err + } + c.Cores = int32(ncpu) + + if c.ModelName, err = unix.Sysctl("machdep.dmi.processor-version"); err != nil { + return nil, err + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go new file mode 100644 index 000000000..57e14528d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..57e14528d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go index 4dc2bba58..67ae900bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go @@ -1,5 +1,5 @@ -//go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd +//go:build darwin || freebsd || openbsd || netbsd +// +build darwin freebsd openbsd netbsd package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go index 1be2e8533..f045d4f17 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go @@ -8,7 +8,7 @@ import ( "context" "encoding/binary" "errors" - "io/ioutil" + "io" "os" "strings" "unsafe" @@ -59,7 +59,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go b/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go index 585250f9a..a393ca15d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows +// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go index 2c9aa9d0d..9a5382d39 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "encoding/binary" - "io/ioutil" + "io" "math" "os" "strings" @@ -54,7 +54,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } @@ -111,7 +111,7 @@ func getUsersFromUtmp(utmpfile string) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go index e6ac63a39..f9d7995e7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go @@ -8,7 +8,7 @@ import ( "context" "encoding/binary" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "regexp" @@ -91,7 +91,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return nil, err } @@ -411,13 +411,13 @@ func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, err } for _, file := range files { // Get the name of the temperature you are reading - name, err := ioutil.ReadFile(filepath.Join(file, "type")) + name, err := os.ReadFile(filepath.Join(file, "type")) if err != nil { warns.Add(err) continue } // Get the temperature reading - current, err := ioutil.ReadFile(filepath.Join(file, "temp")) + current, err := os.ReadFile(filepath.Join(file, "temp")) if err != nil { warns.Add(err) continue @@ -461,13 +461,13 @@ func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, err // Get the label of the temperature you are reading label := "" - if raw, _ = ioutil.ReadFile(basepath + "_label"); len(raw) != 0 { + if raw, _ = os.ReadFile(basepath + "_label"); len(raw) != 0 { // Format the label from "Core 0" to "core_0" label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(raw))), " "), "_") } // Get the name of the temperature you are reading - if raw, err = ioutil.ReadFile(filepath.Join(directory, "name")); err != nil { + if raw, err = os.ReadFile(filepath.Join(directory, "name")); err != nil { warns.Add(err) continue } @@ -479,7 +479,7 @@ func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, err } // Get the temperature reading - if raw, err = ioutil.ReadFile(file); err != nil { + if raw, err = os.ReadFile(file); err != nil { warns.Add(err) continue } @@ -513,7 +513,7 @@ func optionalValueReadFromFile(filename string) float64 { return 0 } - if raw, err = ioutil.ReadFile(filename); err != nil { + if raw, err = os.ReadFile(filename); err != nil { return 0 } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go new file mode 100644 index 000000000..488f1dfc2 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go @@ -0,0 +1,55 @@ +//go:build netbsd +// +build netbsd + +package host + +import ( + "context" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/unix" +) + +func HostIDWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func numProcs(ctx context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform := "" + family := "" + version := "" + + p, err := unix.Sysctl("kern.ostype") + if err == nil { + platform = strings.ToLower(p) + } + v, err := unix.Sysctl("kern.osrelease") + if err == nil { + version = strings.ToLower(v) + } + + return platform, family, version, nil +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + var ret []UserStat + return ret, common.ErrNotImplementedError +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformationWithContext(ctx) + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go index 569de4abd..325015c23 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "encoding/binary" - "io/ioutil" + "io" "os" "strings" "unsafe" @@ -65,7 +65,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go index 24529f19f..e7e0d837f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go @@ -1,5 +1,5 @@ -//go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris +//go:build linux || freebsd || openbsd || netbsd || darwin || solaris +// +build linux freebsd openbsd netbsd darwin solaris package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go b/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go index 7d3625acb..fef67f835 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go @@ -7,7 +7,6 @@ import ( "encoding/csv" "fmt" "io" - "io/ioutil" "os" "regexp" "strconv" @@ -60,7 +59,7 @@ func HostIDWithContext(ctx context.Context) (string, error) { // Count number of processes based on the number of entries in /proc func numProcs(ctx context.Context) (uint64, error) { - dirs, err := ioutil.ReadDir("/proc") + dirs, err := os.ReadDir("/proc") if err != nil { return 0, err } @@ -138,7 +137,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { // Find distribution name from /etc/release func parseReleaseFile() (string, error) { - b, err := ioutil.ReadFile("/etc/release") + b, err := os.ReadFile("/etc/release") if err != nil { return "", err } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index 7a31d251b..99ed6a58e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -14,7 +14,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/url" "os" "os/exec" @@ -87,7 +86,7 @@ func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { fpath += "_" + i.Suffix } if PathExists(fpath) { - return ioutil.ReadFile(fpath) + return os.ReadFile(fpath) } return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) } @@ -100,7 +99,7 @@ var ErrNotImplementedError = errors.New("not implemented yet") // ReadFile reads contents from a file func ReadFile(filename string) (string, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return "", err } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go new file mode 100644 index 000000000..efbc710a5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go @@ -0,0 +1,66 @@ +//go:build netbsd +// +build netbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem.go index ff960dacc..edaf268bb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem.go @@ -50,6 +50,7 @@ type VirtualMemoryStat struct { // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html // https://www.kernel.org/doc/Documentation/filesystems/proc.txt // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting + // https://www.kernel.org/doc/Documentation/vm/transhuge.txt Buffers uint64 `json:"buffers"` Cached uint64 `json:"cached"` WriteBack uint64 `json:"writeBack"` @@ -78,6 +79,7 @@ type VirtualMemoryStat struct { HugePagesRsvd uint64 `json:"hugePagesRsvd"` HugePagesSurp uint64 `json:"hugePagesSurp"` HugePageSize uint64 `json:"hugePageSize"` + AnonHugePages uint64 `json:"anonHugePages"` } type SwapMemoryStat struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go index ce930fbe4..ef867d742 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go @@ -1,5 +1,5 @@ -//go:build freebsd || openbsd -// +build freebsd openbsd +//go:build freebsd || openbsd || netbsd +// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go index 0b6c528f2..697fd8709 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix +//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go index 935331728..214a91e47 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go @@ -311,6 +311,12 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu return ret, retEx, err } ret.HugePageSize = t * 1024 + case "AnonHugePages": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.AnonHugePages = t * 1024 } } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go new file mode 100644 index 000000000..d1f54ecaf --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go @@ -0,0 +1,87 @@ +//go:build netbsd +// +build netbsd + +package mem + +import ( + "context" + "errors" + "fmt" + + "golang.org/x/sys/unix" +) + +func GetPageSize() (uint64, error) { + return GetPageSizeWithContext(context.Background()) +} + +func GetPageSizeWithContext(ctx context.Context) (uint64, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return 0, err + } + return uint64(uvmexp.Pagesize), nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return nil, err + } + p := uint64(uvmexp.Pagesize) + + ret := &VirtualMemoryStat{ + Total: uint64(uvmexp.Npages) * p, + Free: uint64(uvmexp.Free) * p, + Active: uint64(uvmexp.Active) * p, + Inactive: uint64(uvmexp.Inactive) * p, + Cached: 0, // not available + Wired: uint64(uvmexp.Wired) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + // Get buffers from vm.bufmem sysctl + ret.Buffers, err = unix.SysctlUint64("vm.bufmem") + if err != nil { + return nil, err + } + + return ret, nil +} + +// Return swapctl summary info +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + out, err := invoke.CommandWithContext(ctx, "swapctl", "-sk") + if err != nil { + return &SwapMemoryStat{}, nil + } + + line := string(out) + var total, used, free uint64 + + _, err = fmt.Sscanf(line, + "total: %d 1K-blocks allocated, %d used, %d available", + &total, &used, &free) + if err != nil { + return nil, errors.New("failed to parse swapctl output") + } + + percent := float64(used) / float64(total) * 100 + return &SwapMemoryStat{ + Total: total * 1024, + Used: used * 1024, + Free: free * 1024, + UsedPercent: percent, + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go index de0ea7345..6e8ce67fb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -643,7 +642,7 @@ func (p *process) getUids(ctx context.Context) ([]int32, error) { func (p *process) fillFromStatus(ctx context.Context) error { pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return err } @@ -784,7 +783,7 @@ func processInetWithContext(ctx context.Context, file string, kind netConnection // This minimizes duplicates in the returned connections // For more info: // https://github.com/shirou/gopsutil/pull/361 - contents, err := ioutil.ReadFile(file) + contents, err := os.ReadFile(file) if err != nil { return nil, err } @@ -845,7 +844,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in // This minimizes duplicates in the returned connections // For more info: // https://github.com/shirou/gopsutil/pull/361 - contents, err := ioutil.ReadFile(file) + contents, err := os.ReadFile(file) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go index 37cb7ca44..f7989cd21 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -136,7 +135,7 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "stat") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return false, err } @@ -391,7 +390,7 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M smapsPath = smapsRollupPath } } - contents, err := ioutil.ReadFile(smapsPath) + contents, err := os.ReadFile(smapsPath) if err != nil { return nil, err } @@ -484,7 +483,7 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { environPath := common.HostProcWithContext(ctx, strconv.Itoa(int(p.Pid)), "environ") - environContent, err := ioutil.ReadFile(environPath) + environContent, err := os.ReadFile(environPath) if err != nil { return nil, err } @@ -668,7 +667,7 @@ func (p *Process) fillFromExeWithContext(ctx context.Context) (string, error) { func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return "", err } @@ -682,7 +681,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return nil, err } @@ -705,7 +704,7 @@ func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, error) { pid := p.Pid ioPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "io") - ioline, err := ioutil.ReadFile(ioPath) + ioline, err := os.ReadFile(ioPath) if err != nil { return nil, err } @@ -741,7 +740,7 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e func (p *Process) fillFromStatmWithContext(ctx context.Context) (*MemoryInfoStat, *MemoryInfoExStat, error) { pid := p.Pid memPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "statm") - contents, err := ioutil.ReadFile(memPath) + contents, err := os.ReadFile(memPath) if err != nil { return nil, nil, err } @@ -802,7 +801,7 @@ func (p *Process) fillNameWithContext(ctx context.Context) error { func (p *Process) fillFromCommWithContext(ctx context.Context) error { pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "comm") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return err } @@ -819,7 +818,7 @@ func (p *Process) fillFromStatus() error { func (p *Process) fillFromStatusWithContext(ctx context.Context) error { pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return err } @@ -1026,7 +1025,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui statPath = common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat") } - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go index ad1c3cfc1..dd4bd4760 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go @@ -3,7 +3,6 @@ package process import ( "bytes" "context" - "io/ioutil" "os" "strconv" "strings" @@ -232,7 +231,7 @@ func (p *Process) fillFromPathAOutWithContext(ctx context.Context) (string, erro func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, error) { pid := p.Pid execNamePath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "execname") - exe, err := ioutil.ReadFile(execNamePath) + exe, err := os.ReadFile(execNamePath) if err != nil { return "", err } @@ -242,7 +241,7 @@ func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, erro func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return "", err } @@ -259,7 +258,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return nil, err } diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md index da2bad447..96b2e4dc3 100644 --- a/vendor/github.com/tidwall/gjson/README.md +++ b/vendor/github.com/tidwall/gjson/README.md @@ -427,16 +427,6 @@ if result.Index > 0 { This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. -## Get multiple values at once - -The `GetMany` function can be used to get multiple values at the same time. - -```go -results := gjson.GetMany(json, "name.first", "name.last", "age") -``` - -The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. - ## Performance Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go index a1633be52..79498250a 100644 --- a/vendor/github.com/tidwall/gjson/gjson.go +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -3410,7 +3410,7 @@ func (t Result) Path(json string) string { if !rcomp.Exists() { goto fail } - comp := escapeComp(rcomp.String()) + comp := Escape(rcomp.String()) path = append(path, '.') path = append(path, comp...) } @@ -3425,17 +3425,31 @@ fail: // isSafePathKeyChar returns true if the input character is safe for not // needing escaping. func isSafePathKeyChar(c byte) bool { - return c <= ' ' || c > '~' || c == '_' || c == '-' || c == ':' || - (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || c <= ' ' || c > '~' || c == '_' || + c == '-' || c == ':' } -// escapeComp escaped a path compontent, making it safe for generating a -// path for later use. -func escapeComp(comp string) string { +// Escape returns an escaped path component. +// +// json := `{ +// "user":{ +// "first.name": "Janet", +// "last.name": "Prichard" +// } +// }` +// user := gjson.Get(json, "user") +// println(user.Get(gjson.Escape("first.name")) +// println(user.Get(gjson.Escape("last.name")) +// // Output: +// // Janet +// // Prichard +func Escape(comp string) string { for i := 0; i < len(comp); i++ { if !isSafePathKeyChar(comp[i]) { - ncomp := []byte(comp[:i]) + ncomp := make([]byte, len(comp)+1) + copy(ncomp, comp[:i]) + ncomp = ncomp[:i] for ; i < len(comp); i++ { if !isSafePathKeyChar(comp[i]) { ncomp = append(ncomp, '\\') diff --git a/vendor/modules.txt b/vendor/modules.txt index fc257ae1d..744c4b1ca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,13 +58,13 @@ github.com/power-devops/perfstat github.com/rivo/uniseg # github.com/rogpeppe/go-internal v1.11.0 ## explicit; go 1.19 -# github.com/rs/zerolog v1.30.0 +# github.com/rs/zerolog v1.31.0 ## explicit; go 1.15 github.com/rs/zerolog github.com/rs/zerolog/internal/cbor github.com/rs/zerolog/internal/json github.com/rs/zerolog/log -# github.com/shirou/gopsutil/v3 v3.23.8 +# github.com/shirou/gopsutil/v3 v3.23.9 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/cpu @@ -82,7 +82,7 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/tidwall/gjson v1.16.0 +# github.com/tidwall/gjson v1.17.0 ## explicit; go 1.12 github.com/tidwall/gjson # github.com/tidwall/match v1.1.1