From 6551cef35a7b873adedec1c20c39228ba0d0dbb9 Mon Sep 17 00:00:00 2001 From: Chris Grindstaff Date: Fri, 6 Oct 2023 13:22:22 -0400 Subject: [PATCH 1/2] doc: `generate metrics` should include metrics Created by builtin plugins: Aggregator, Max, and MetricAgent --- cmd/poller/plugin/aggregator/aggregator.go | 10 + cmd/poller/plugin/max/max.go | 12 +- cmd/poller/plugin/metricagent/metric_agent.go | 15 +- cmd/poller/plugin/plugin.go | 6 + cmd/tools/generate/counter.go | 193 +- cmd/tools/rest/rest.go | 4 + cmd/tools/template/template.go | 399 ++++ cmd/tools/template/template_test.go | 336 +--- docs/ontap-metrics.md | 1736 ++++++++++++++++- 9 files changed, 2264 insertions(+), 447 deletions(-) create mode 100644 cmd/tools/template/template.go diff --git a/cmd/poller/plugin/aggregator/aggregator.go b/cmd/poller/plugin/aggregator/aggregator.go index d8b0c105c..405051028 100644 --- a/cmd/poller/plugin/aggregator/aggregator.go +++ b/cmd/poller/plugin/aggregator/aggregator.go @@ -306,3 +306,13 @@ func (a *Aggregator) NewLabels() []string { return newLabelNames } + +// NewMetrics returns the new metrics the receiver creates +func (a *Aggregator) NewMetrics() []plugin.DerivedMetric { + var derivedMetrics []plugin.DerivedMetric + for _, r := range a.rules { + derivedMetrics = append(derivedMetrics, plugin.DerivedMetric{Name: r.label, Source: r.object}) + } + + return derivedMetrics +} diff --git a/cmd/poller/plugin/max/max.go b/cmd/poller/plugin/max/max.go index 37bc27c99..3730ea766 100644 --- a/cmd/poller/plugin/max/max.go +++ b/cmd/poller/plugin/max/max.go @@ -20,7 +20,7 @@ type Max struct { rules []*rule } -func New(p *plugin.AbstractPlugin) plugin.Plugin { +func New(p *plugin.AbstractPlugin) *Max { return &Max{AbstractPlugin: p} } @@ -239,3 +239,13 @@ func (m *Max) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { return matricesArray, nil } + +// NewMetrics returns the new metrics the receiver creates +func (m *Max) NewMetrics() []plugin.DerivedMetric { + var derivedMetrics []plugin.DerivedMetric + for _, r := range m.rules { + derivedMetrics = append(derivedMetrics, plugin.DerivedMetric{Name: r.object, Source: r.label, IsMax: true}) + } + + return derivedMetrics +} diff --git a/cmd/poller/plugin/metricagent/metric_agent.go b/cmd/poller/plugin/metricagent/metric_agent.go index 8fa2c7060..0a377eeae 100644 --- a/cmd/poller/plugin/metricagent/metric_agent.go +++ b/cmd/poller/plugin/metricagent/metric_agent.go @@ -9,6 +9,7 @@ import ( "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "strconv" + "strings" ) type MetricAgent struct { @@ -17,7 +18,7 @@ type MetricAgent struct { computeMetricRules []computeMetricRule } -func New(p *plugin.AbstractPlugin) plugin.Plugin { +func New(p *plugin.AbstractPlugin) *MetricAgent { return &MetricAgent{AbstractPlugin: p} } @@ -144,3 +145,15 @@ func (a *MetricAgent) getMetric(m *matrix.Matrix, name string) *matrix.Metric { } return m.GetMetric(name) } + +// NewMetrics returns the new metrics the receiver creates +func (a *MetricAgent) NewMetrics() []plugin.DerivedMetric { + var derivedMetrics []plugin.DerivedMetric + for _, rule := range a.computeMetricRules { + derivedMetrics = append(derivedMetrics, plugin.DerivedMetric{ + Name: rule.metric, + Source: strings.Join(rule.metricNames, ", "), + }) + } + return derivedMetrics +} diff --git a/cmd/poller/plugin/plugin.go b/cmd/poller/plugin/plugin.go index 43d14a0a7..15bc6bc70 100644 --- a/cmd/poller/plugin/plugin.go +++ b/cmd/poller/plugin/plugin.go @@ -173,3 +173,9 @@ func GetInterval(param *node.Node, defaultInterval time.Duration) float64 { } return defaultInterval.Seconds() } + +type DerivedMetric struct { + Name string + Source string + IsMax bool +} diff --git a/cmd/tools/generate/counter.go b/cmd/tools/generate/counter.go index 45d5a478a..05092f184 100644 --- a/cmd/tools/generate/counter.go +++ b/cmd/tools/generate/counter.go @@ -2,7 +2,9 @@ package generate import ( "fmt" + "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/cmd/tools/rest" + template2 "github.com/netapp/harvest/v2/cmd/tools/template" "github.com/netapp/harvest/v2/pkg/api/ontapi/zapi" "github.com/netapp/harvest/v2/pkg/tree" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -250,15 +252,18 @@ func processRestConfigCounters(path string) map[string]Counter { ) t, err := tree.ImportYaml(path) if t == nil || err != nil { - fmt.Printf("Unable to import template file %s. File is invalid or empty\n", path) + fmt.Printf("Unable to import template file %s. File is invalid or empty err=%s\n", path, err) return nil } - query := t.GetChildContentS("query") - object := t.GetChildContentS("object") + model, err := template2.ReadTemplate(path) + if err != nil { + fmt.Printf("Unable to import template file %s. File is invalid or empty err=%s\n", path, err) + return nil + } + noExtraMetrics := len(model.MultiplierMetrics) == 0 && len(model.PluginMetrics) == 0 templateCounters := t.GetChildS("counters") - exportData := t.GetChildContentS("export_data") - if exportData == "false" { + if model.ExportData == "false" && noExtraMetrics { return nil } @@ -272,8 +277,8 @@ func processRestConfigCounters(path string) map[string]Counter { if _, ok := excludeCounters[name]; ok { continue } - description := searchDescriptionSwagger(object, name) - harvestName := strings.Join([]string{object, display}, "_") + description := searchDescriptionSwagger(model.Object, name) + harvestName := strings.Join([]string{model.Object, display}, "_") if m == "float" { co := Counter{ Name: harvestName, @@ -281,16 +286,40 @@ func processRestConfigCounters(path string) map[string]Counter { APIs: []MetricDef{ { API: "REST", - Endpoint: query, + Endpoint: model.Query, Template: path, ONTAPCounter: name, }, }, } counters[harvestName] = co + + // If the template has any MultiplierMetrics, add them + for _, metric := range model.MultiplierMetrics { + mc := co + addAggregatedCounter(&mc, metric, harvestName, display) + counters[mc.Name] = mc + } } } } + + // If the template has any PluginMetrics, add them + for _, metric := range model.PluginMetrics { + co := Counter{ + Name: model.Object + "_" + metric.Name, + APIs: []MetricDef{ + { + API: "REST", + Endpoint: model.Query, + Template: path, + ONTAPCounter: metric.Source, + }, + }, + } + counters[co.Name] = co + } + return counters } @@ -309,14 +338,17 @@ func processZAPIPerfCounters(path string, client *zapi.Client) map[string]Counte fmt.Printf("Unable to import template file %s. File is invalid or empty\n", path) return nil } + model, err := template2.ReadTemplate(path) + if err != nil { + fmt.Printf("Unable to import template file %s. File is invalid or empty err=%s\n", path, err) + return nil + } - query := t.GetChildContentS("query") - object := t.GetChildContentS("object") + noExtraMetrics := len(model.MultiplierMetrics) == 0 && len(model.PluginMetrics) == 0 templateCounters := t.GetChildS("counters") override := t.GetChildS("override") - exportData := t.GetChildContentS("export_data") - if exportData == "false" { + if model.ExportData == "false" && noExtraMetrics { return nil } @@ -326,7 +358,7 @@ func processZAPIPerfCounters(path string, client *zapi.Client) map[string]Counte // build request request = node.NewXMLS("perf-object-counter-list-info") - request.NewChildS("objectname", query) + request.NewChildS("objectname", model.Query) if err = client.BuildRequest(request); err != nil { fmt.Printf("error while building request %+v\n", err) @@ -364,11 +396,11 @@ func processZAPIPerfCounters(path string, client *zapi.Client) map[string]Counte for _, c := range templateCounters.GetAllChildContentS() { if c != "" { name, display, m, _ := util.ParseMetric(c) - if strings.HasPrefix(display, object) { - display = strings.TrimPrefix(display, object) + if strings.HasPrefix(display, model.Object) { + display = strings.TrimPrefix(display, model.Object) display = strings.TrimPrefix(display, "_") } - harvestName := strings.Join([]string{object, display}, "_") + harvestName := strings.Join([]string{model.Object, display}, "_") if m == "float" { if _, ok := excludeCounters[name]; ok { continue @@ -380,7 +412,7 @@ func processZAPIPerfCounters(path string, client *zapi.Client) map[string]Counte APIs: []MetricDef{ { API: "ZAPI", - Endpoint: strings.Join([]string{"perf-object-get-instances", query}, " "), + Endpoint: strings.Join([]string{"perf-object-get-instances", model.Query}, " "), Template: path, ONTAPCounter: name, Unit: zapiUnitMap[name], @@ -390,10 +422,33 @@ func processZAPIPerfCounters(path string, client *zapi.Client) map[string]Counte }, } counters[harvestName] = co + + // If the template has any MultiplierMetrics, add them + for _, metric := range model.MultiplierMetrics { + mc := co + addAggregatedCounter(&mc, metric, harvestName, name) + counters[mc.Name] = mc + } } } } } + + // If the template has any PluginMetrics, add them + for _, metric := range model.PluginMetrics { + co := Counter{ + Name: model.Object + "_" + metric.Name, + APIs: []MetricDef{ + { + API: "ZAPI", + Endpoint: model.Query, + Template: path, + ONTAPCounter: metric.Source, + }, + }, + } + counters[co.Name] = co + } return counters } @@ -406,12 +461,14 @@ func processZapiConfigCounters(path string) map[string]Counter { fmt.Printf("Unable to import template file %s. File is invalid or empty\n", path) return nil } - - query := t.GetChildContentS("query") - object := t.GetChildContentS("object") + model, err := template2.ReadTemplate(path) + if err != nil { + fmt.Printf("Unable to import template file %s. File is invalid or empty err=%s\n", path, err) + return nil + } + noExtraMetrics := len(model.MultiplierMetrics) == 0 && len(model.PluginMetrics) == 0 templateCounters := t.GetChildS("counters") - exportData := t.GetChildContentS("export_data") - if exportData == "false" { + if model.ExportData == "false" && noExtraMetrics { return nil } if templateCounters == nil { @@ -421,7 +478,7 @@ func processZapiConfigCounters(path string) map[string]Counter { zc := make(map[string]string) for _, c := range templateCounters.GetChildren() { - parseZapiCounters(c, []string{}, object, zc) + parseZapiCounters(c, []string{}, model.Object, zc) } for k, v := range zc { @@ -433,13 +490,36 @@ func processZapiConfigCounters(path string) map[string]Counter { APIs: []MetricDef{ { API: "ZAPI", - Endpoint: query, + Endpoint: model.Query, Template: path, ONTAPCounter: v, }, }, } counters[k] = co + + // If the template has any MultiplierMetrics, add them + for _, metric := range model.MultiplierMetrics { + mc := co + addAggregatedCounter(&mc, metric, co.Name, model.Object) + counters[mc.Name] = mc + } + } + + // If the template has any PluginMetrics, add them + for _, metric := range model.PluginMetrics { + co := Counter{ + Name: model.Object + "_" + metric.Name, + APIs: []MetricDef{ + { + API: "ZAPI", + Endpoint: model.Query, + Template: path, + ONTAPCounter: metric.Source, + }, + }, + } + counters[co.Name] = co } return counters } @@ -454,7 +534,9 @@ func visitRestTemplates(dir string, client *rest.Client, eachTemp func(path stri if ext != ".yaml" { return nil } - + if strings.HasSuffix(path, "default.yaml") { + return nil + } r := eachTemp(path, client) for k, v := range r { result[k] = v @@ -534,7 +616,7 @@ func generateCounterTemplate(counters map[string]Counter, client *rest.Client) { break } } - //missing Rest Mapping + // missing Rest Mapping if isPrint { fmt.Printf("Missing %s mapping for %v \n", "REST", counter) } @@ -601,28 +683,32 @@ func processRestPerfCounters(path string, client *rest.Client) map[string]Counte fmt.Printf("Unable to import template file %s. File is invalid or empty\n", path) return nil } - - query := t.GetChildContentS("query") - object := t.GetChildContentS("object") + model, err := template2.ReadTemplate(path) + if err != nil { + fmt.Printf("Unable to import template file %s. File is invalid or empty err=%s\n", path, err) + return nil + } + noExtraMetrics := len(model.MultiplierMetrics) == 0 && len(model.PluginMetrics) == 0 templateCounters := t.GetChildS("counters") override := t.GetChildS("override") - exportData := t.GetChildContentS("export_data") - if exportData == "false" { + if model.ExportData == "false" && noExtraMetrics { return nil } if templateCounters == nil { return nil } counterMap := make(map[string]string) + counterMapNoPrefix := make(map[string]string) for _, c := range templateCounters.GetAllChildContentS() { if c != "" { name, display, m, _ := util.ParseMetric(c) if m == "float" { - counterMap[name] = strings.Join([]string{object, display}, "_") + counterMap[name] = strings.Join([]string{model.Object, display}, "_") + counterMapNoPrefix[name] = display } } } - href := rest.BuildHref(query, "", nil, "", "", "", "", query) + href := rest.BuildHref(model.Query, "", nil, "", "", "", "", model.Query) records, err = rest.Fetch(client, href) if err != nil { fmt.Printf("error while invoking api %+v\n", err) @@ -659,7 +745,7 @@ func processRestPerfCounters(path string, client *rest.Client) map[string]Counte APIs: []MetricDef{ { API: "REST", - Endpoint: query, + Endpoint: model.Query, Template: path, ONTAPCounter: ontapCounterName, Unit: r.Get("unit").String(), @@ -669,12 +755,51 @@ func processRestPerfCounters(path string, client *rest.Client) map[string]Counte }, } counters[c.Name] = c + + // If the template has any MultiplierMetrics, add them + for _, metric := range model.MultiplierMetrics { + mc := c + addAggregatedCounter(&mc, metric, v, counterMapNoPrefix[ontapCounterName]) + counters[mc.Name] = mc + } } return true }) + + // If the template has any PluginMetrics, add them + for _, metric := range model.PluginMetrics { + co := Counter{ + Name: model.Object + "_" + metric.Name, + APIs: []MetricDef{ + { + API: "REST", + Endpoint: model.Query, + Template: path, + ONTAPCounter: metric.Source, + }, + }, + } + counters[co.Name] = co + } + return counters } +func addAggregatedCounter(c *Counter, metric plugin.DerivedMetric, withPrefix string, noPrefix string) { + if !strings.HasSuffix(c.Description, ".") { + c.Description = c.Description + "." + } + if metric.IsMax { + c.Name = metric.Name + "_" + noPrefix + c.Description = fmt.Sprintf("%s %s is the maximum of [%s](#%s) for label `%s`.", + c.Description, c.Name, withPrefix, withPrefix, metric.Source) + } else { + c.Name = metric.Name + "_" + c.Name + c.Description = fmt.Sprintf("%s %s is [%s](#%s) aggregated by `%s`.", + c.Description, c.Name, withPrefix, withPrefix, metric.Name) + } +} + func processExternalCounters(counters map[string]Counter) map[string]Counter { dat, err := os.ReadFile("cmd/tools/generate/counter.yaml") if err != nil { diff --git a/cmd/tools/rest/rest.go b/cmd/tools/rest/rest.go index a42a402dc..923a67351 100644 --- a/cmd/tools/rest/rest.go +++ b/cmd/tools/rest/rest.go @@ -99,6 +99,10 @@ func ReadOrDownloadSwagger(pName string) (string, error) { bytesDownloaded, err := downloadSwagger(poller, swaggerPath, swaggerURL, args.Verbose) if err != nil { fmt.Printf("error downloading swagger %s\n", err) + if bytesDownloaded == 0 { + // if the tmp file exists, remove it since it is empty + _ = os.Remove(swaggerPath) + } return "", err } fmt.Printf("downloaded %d bytes from %s\n", bytesDownloaded, swaggerURL) diff --git a/cmd/tools/template/template.go b/cmd/tools/template/template.go new file mode 100644 index 000000000..d33875ba8 --- /dev/null +++ b/cmd/tools/template/template.go @@ -0,0 +1,399 @@ +package template + +import ( + "bufio" + "errors" + "fmt" + "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/cmd/poller/plugin/aggregator" + "github.com/netapp/harvest/v2/cmd/poller/plugin/labelagent" + max2 "github.com/netapp/harvest/v2/cmd/poller/plugin/max" + "github.com/netapp/harvest/v2/cmd/poller/plugin/metricagent" + "github.com/netapp/harvest/v2/pkg/errs" + "github.com/netapp/harvest/v2/pkg/tree" + "github.com/netapp/harvest/v2/pkg/tree/node" + y3 "gopkg.in/yaml.v3" + "os" + "regexp" + "strings" +) + +type Model struct { + Name string `yaml:"name"` + Query string `yaml:"query"` + Object string `yaml:"object"` + ExportData string `yaml:"export_data"` + Endpoints []*Endpoint `yaml:"endpoints"` + ExportOptions struct { + InstanceKeys []string `yaml:"instance_keys"` + InstanceLabels []string `yaml:"instance_labels"` + IncludeAllLabels bool `yaml:"include_all_labels"` + } `yaml:"export_options"` + metrics []Metric + pluginLabels []string + PluginMetrics []plugin.DerivedMetric + MultiplierMetrics []plugin.DerivedMetric +} + +type Metric struct { + left string + right string + line string + renameColumn int + hasSigil bool + column int + parents []string +} + +type Endpoint struct { + Query string `yaml:"query"` + Counters []string `yaml:"counters"` + Metrics []Metric +} + +func ReadTemplate(path string) (Model, error) { + var model Model + data, err := os.ReadFile(path) + if err != nil { + return Model{}, err + } + model, err = unmarshalModel(data) + if err != nil { + return Model{}, err + } + err = readPlugins(path, &model) + if err != nil { + return Model{}, err + } + return model, nil +} + +func unmarshalModel(data []byte) (Model, error) { + tm := Model{} + root := &y3.Node{} + err := y3.Unmarshal(data, root) + if err != nil { + return tm, fmt.Errorf("failed to unmarshal err: %w", err) + } + if len(root.Content) == 0 { + return tm, errs.New(errs.ErrConfig, "template file is empty or does not exist") + } + contentNode := root.Content[0] + err = readNameQueryObject(&tm, contentNode) + if err != nil { + return tm, err + } + countersNode := searchNode(contentNode, "counters") + if countersNode == nil { + return tm, fmt.Errorf("template has no counters") + } + metrics := make([]Metric, 0) + flattenCounters(countersNode, &metrics, make([]string, 0)) + addEndpoints(&tm, searchNode(contentNode, "endpoints"), make([]string, 0)) + addExportOptions(&tm, searchNode(contentNode, "export_options")) + + tm.metrics = metrics + return tm, nil +} + +func readPlugins(path string, model *Model) error { + template, err := tree.ImportYaml(path) + if err != nil { + return fmt.Errorf("failed to ImportYaml err: %w", err) + } + err = findBuiltInPlugins(template, model) + if err != nil { + return fmt.Errorf("failed to find findBuiltInPlugins err: %w", err) + } + err = findCustomPlugins(path, template, model) + if err != nil { + return fmt.Errorf("failed to findCustomPlugins err: %w", err) + } + return nil +} + +func readNameQueryObject(tm *Model, root *y3.Node) error { + nameNode := searchNode(root, "name") + if nameNode != nil { + tm.Name = nameNode.Value + } + queryNode := searchNode(root, "query") + if queryNode != nil { + tm.Query = queryNode.Value + } + objectNode := searchNode(root, "object") + if objectNode != nil { + tm.Object = objectNode.Value + } + if tm.Name == "" { + return fmt.Errorf("template has no name") + } + if tm.Query == "" { + return fmt.Errorf("template has no query") + } + if tm.Object == "" { + return fmt.Errorf("template has no object") + } + return nil +} + +func addEndpoints(tm *Model, n *y3.Node, parents []string) { + if n == nil { + return + } + for _, m := range n.Content { + query := m.Content[1].Value + metrics := make([]Metric, 0) + countersNode := m.Content[3] + flattenCounters(countersNode, &metrics, parents) + ep := &Endpoint{Query: query, Metrics: metrics} + tm.Endpoints = append(tm.Endpoints, ep) + } +} + +func searchNode(r *y3.Node, key string) *y3.Node { + for i, n := range r.Content { + if n.Tag == "!!str" && n.Value == key { + return r.Content[i+1] + } + } + return nil +} + +func addExportOptions(tm *Model, n *y3.Node) { + if n == nil { + return + } + instanceKeys := searchNode(n, "instance_keys") + if instanceKeys != nil { + for _, ikn := range instanceKeys.Content { + tm.ExportOptions.InstanceKeys = append(tm.ExportOptions.InstanceKeys, ikn.Value) + } + } + instanceLabels := searchNode(n, "instance_labels") + if instanceLabels != nil { + for _, il := range instanceLabels.Content { + tm.ExportOptions.InstanceLabels = append(tm.ExportOptions.InstanceLabels, il.Value) + } + } +} + +func flattenCounters(n *y3.Node, metrics *[]Metric, parents []string) { + switch n.Tag { + case "!!map": + key := n.Content[0].Value + if key == "hidden_fields" || key == "filter" { + return + } + parents = append(parents, key) + flattenCounters(n.Content[1], metrics, parents) + case "!!seq": + for _, c := range n.Content { + flattenCounters(c, metrics, parents) + } + case "!!str": + *metrics = append(*metrics, newZapiMetric(n, parents)) + } +} + +var sigilReplacer = strings.NewReplacer("^", "", "- ", "") + +func newZapiMetric(n *y3.Node, parents []string) Metric { + // separate left and right and remove all sigils + text := n.Value + noSigils := sigilReplacer.Replace(text) + before, after, found := strings.Cut(noSigils, "=>") + m := Metric{ + line: text, + left: strings.TrimSpace(noSigils), + hasSigil: strings.Contains(text, "^"), + column: n.Column, + parents: parents, + } + if found { + m.left = strings.TrimSpace(before) + m.right = trimComment(after) + m.renameColumn = strings.Index(text, "=>") + n.Column + } + return m +} + +func trimComment(text string) string { + lastSink := strings.Index(text, "#") + if lastSink > -1 { + return strings.TrimSpace(text[:lastSink]) + } + return strings.TrimSpace(text) +} + +func findBuiltInPlugins(template *node.Node, model *Model) error { + var ee []error + template.PreprocessTemplate() + + err := readLabelAgent(template, model) + if err != nil { + ee = append(ee, err) + } + + err = readAggregator(template, model) + if err != nil { + ee = append(ee, err) + } + + err = readMetricAgent(template, model) + if err != nil { + ee = append(ee, err) + } + + err = readMax(template, model) + if err != nil { + ee = append(ee, err) + } + + return errors.Join(ee...) +} + +func readMax(template *node.Node, model *Model) error { + children := template.SearchChildren([]string{"plugins", "Max"}) + if len(children) != 0 { + abc := plugin.AbstractPlugin{Params: children[0]} + mm := max2.New(&abc) + err := mm.Init() + if err != nil { + return err + } + model.MultiplierMetrics = append(model.MultiplierMetrics, mm.NewMetrics()...) + } + + return nil +} + +func readMetricAgent(template *node.Node, model *Model) error { + children := template.SearchChildren([]string{"plugins", "MetricAgent"}) + if len(children) == 0 { + return nil + } + abc := plugin.AbstractPlugin{Params: children[0]} + ma := metricagent.New(&abc) + err := ma.Init() + if err != nil { + return err + } + model.PluginMetrics = append(model.PluginMetrics, ma.NewMetrics()...) + return nil +} + +func readAggregator(template *node.Node, model *Model) error { + children := template.SearchChildren([]string{"plugins", "Aggregator"}) + if len(children) != 0 { + abc := plugin.AbstractPlugin{Params: children[0]} + agg := aggregator.New(&abc) + err := agg.Init() + if err != nil { + return err + } + model.pluginLabels = append(model.pluginLabels, agg.NewLabels()...) + model.MultiplierMetrics = append(model.MultiplierMetrics, agg.NewMetrics()...) + } + + return nil +} + +func readLabelAgent(template *node.Node, model *Model) error { + children := template.SearchChildren([]string{"plugins", "LabelAgent"}) + if len(children) == 0 { + return nil + } + abc := plugin.AbstractPlugin{Params: children[0]} + la := labelagent.New(&abc) + err := la.Init() + if err != nil { + return err + } + model.pluginLabels = la.NewLabels() + return nil +} + +var setRe = regexp.MustCompile(`[sS]etLabel\("?(\w+)"?,`) + +func findCustomPlugins(path string, template *node.Node, model *Model) error { + plug := template.SearchChildren([]string{"plugins"}) + if len(plug) == 0 { + return nil + } + builtIn := map[string]bool{ + "LabelAgent": true, + "MetricAgent": true, + "Aggregator": true, + "Max": true, + "Tenant": true, + } + for _, child := range plug[0].Children { + name := child.GetNameS() + if name == "" { + name = child.GetContentS() + } + if builtIn[name] { + continue + } + + goPluginName := strings.ToLower(name) + pluginGo := toPluginPath(path, goPluginName) + + err2 := readPlugin(pluginGo, model) + if err2 != nil { + return err2 + } + + // special case for labels added outside normal per-object plugin + if strings.Contains(path, "snapmirror.yaml") || strings.Contains(path, "svm.yaml") { + pluginGo2 := toPluginPath(path, "commonutils") + err2 = readPlugin(pluginGo2, model) + if err2 != nil { + return err2 + } + } + } + return nil +} + +func toPluginPath(path string, pluginName string) string { + // ../../../conf/rest/9.10.0/sensor.yaml -> ../../../cmd/collectors/rest/plugins/sensor/sensor.go + // conf/rest/9.10.0/sensor.yaml -> cmd/collectors/rest/plugins/sensor/sensor.go + + before, after, _ := strings.Cut(path, "conf/") + + // Both Zapi and REST sensor.yaml templates uses a single plugin defined in power.go + if strings.Contains(path, "sensor.yaml") { + return fmt.Sprintf("%scmd/collectors/power.go", before) + } + + base := strings.Split(after, "/") + p := fmt.Sprintf("%scmd/collectors/%s/plugins/%s/%s.go", before, base[0], pluginName, pluginName) + + // special case for labels added outside normal per-object plugin + if pluginName == "commonutils" { + return fmt.Sprintf("%scmd/collectors/commonutils.go", before) + } + + return p +} + +func readPlugin(fileName string, model *Model) error { + file, err := os.Open(fileName) + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + text := scanner.Text() + trimmed := strings.TrimSpace(text) + matches := setRe.FindStringSubmatch(trimmed) + if len(matches) == 2 { + model.pluginLabels = append(model.pluginLabels, matches[1]) + } + } + _ = file.Close() + return nil +} diff --git a/cmd/tools/template/template_test.go b/cmd/tools/template/template_test.go index 3fae46e51..c6d6d63b8 100644 --- a/cmd/tools/template/template_test.go +++ b/cmd/tools/template/template_test.go @@ -1,15 +1,8 @@ package template import ( - "bufio" "bytes" "fmt" - "github.com/netapp/harvest/v2/cmd/poller/plugin" - "github.com/netapp/harvest/v2/cmd/poller/plugin/aggregator" - "github.com/netapp/harvest/v2/cmd/poller/plugin/labelagent" - "github.com/netapp/harvest/v2/pkg/errs" - "github.com/netapp/harvest/v2/pkg/tree" - "github.com/netapp/harvest/v2/pkg/tree/node" "github.com/netapp/harvest/v2/pkg/util" y3 "gopkg.in/yaml.v3" "io/fs" @@ -73,7 +66,7 @@ func TestTemplateNamesMatchDefault(t *testing.T) { } - visitTemplates(t, func(path string, model TemplateModel) { + visitTemplates(t, func(path string, model Model) { sp := collectorPath(path) o := modelsByTemplate[sp] if o == nil { @@ -140,12 +133,12 @@ func TestTotals(t *testing.T) { totalObject := make(objectMap) var totalCounters int - visitTemplates(t, func(path string, model TemplateModel) { + visitTemplates(t, func(path string, model Model) { totalObject[model.Name] = path totalCounters += len(model.metrics) for _, ep := range model.Endpoints { - totalCounters += len(ep.metrics) + totalCounters += len(ep.Metrics) } }, allTemplatesButEms...) @@ -194,7 +187,7 @@ func newObjectMap(n *y3.Node) objectMap { } func TestMetricColumnAlignmentAndCase(t *testing.T) { - visitTemplates(t, func(path string, model TemplateModel) { + visitTemplates(t, func(path string, model Model) { columnSet := make(map[int]int) for _, m := range model.metrics { if m.renameColumn > 0 { @@ -213,7 +206,7 @@ func TestMetricColumnAlignmentAndCase(t *testing.T) { } for _, ep := range model.Endpoints { - for _, m := range ep.metrics { + for _, m := range ep.Metrics { if m.renameColumn > 0 { columnSet[m.renameColumn]++ } @@ -230,7 +223,7 @@ func TestMetricColumnAlignmentAndCase(t *testing.T) { } func TestNoTabs(t *testing.T) { - visitTemplates(t, func(path string, model TemplateModel) { + visitTemplates(t, func(path string, model Model) { data, err := os.ReadFile(path) if err != nil { t.Errorf("failed to read path=%s err=%v", shortPath(path), err) @@ -257,7 +250,7 @@ func TestExportLabelsExist(t *testing.T) { "zapi/cdot/9.8.0/qos_policy_fixed.yaml", } - visitTemplates(t, func(path string, model TemplateModel) { + visitTemplates(t, func(path string, model Model) { shortenedPath := shortPath(path) isZapi := strings.Contains(path, "zapi") isZapiPerf := strings.Contains(path, "zapiperf") @@ -289,7 +282,7 @@ func TestExportLabelsExist(t *testing.T) { } } for _, ep := range model.Endpoints { - for _, m := range ep.metrics { + for _, m := range ep.Metrics { if m.right != "" { allLabelNames[m.right] = true } else { @@ -318,17 +311,7 @@ type sorted struct { want string } -type metric struct { - left string - right string - line string - renameColumn int - hasSigil bool - column int - parents []string -} - -func (m metric) pathString() string { +func (m Metric) pathString() string { return strings.Join(m.parents, "/") + "/" + m.left } @@ -339,7 +322,7 @@ func (m metric) pathString() string { // ZAPI parent attributes are sorted alphabetically // Tests that exported keys and labels are in sorted order func TestMetricsAreSortedAndNoDuplicates(t *testing.T) { - visitTemplates(t, func(path string, model TemplateModel) { + visitTemplates(t, func(path string, model Model) { sortedCounters := checkSortedCounters(model.metrics) if sortedCounters.got != sortedCounters.want { t.Errorf("counters should be sorted path=[%s]", shortPath(path)) @@ -348,7 +331,7 @@ func TestMetricsAreSortedAndNoDuplicates(t *testing.T) { } for _, endpoint := range model.Endpoints { - sortedCounters := checkSortedCounters(endpoint.metrics) + sortedCounters := checkSortedCounters(endpoint.Metrics) if sortedCounters.got != sortedCounters.want { t.Errorf("endpoint=%s counters should be sorted path=[%s]", endpoint.Query, shortPath(path)) t.Errorf("use this instead\n") @@ -377,7 +360,7 @@ func TestMetricsAreSortedAndNoDuplicates(t *testing.T) { }, allTemplatesButEms...) } -func checkForDuplicateMetrics(t *testing.T, model TemplateModel, path string) { +func checkForDuplicateMetrics(t *testing.T, model Model, path string) { dupSet := make(map[string]bool) for _, m := range model.metrics { p := m.pathString() @@ -391,7 +374,7 @@ func checkForDuplicateMetrics(t *testing.T, model TemplateModel, path string) { for _, endpoint := range model.Endpoints { // endpoints are independent metrics dupSet = make(map[string]bool) - for _, m := range endpoint.metrics { + for _, m := range endpoint.Metrics { p := m.pathString() _, ok := dupSet[p] if ok { @@ -402,14 +385,14 @@ func checkForDuplicateMetrics(t *testing.T, model TemplateModel, path string) { } } -func checkSortedCounters(counters []metric) sorted { +func checkSortedCounters(counters []Metric) sorted { got := countersStr(counters) sortZapiCounters(counters) want := countersStr(counters) return sorted{got: got, want: want} } -func countersStr(counters []metric) string { +func countersStr(counters []Metric) string { builder := strings.Builder{} parentSeen := make(map[string]bool) for _, counter := range counters { @@ -437,7 +420,7 @@ func countersStr(counters []metric) string { return builder.String() } -func sortZapiCounters(counters []metric) { +func sortZapiCounters(counters []Metric) { sort.SliceStable(counters, func(i, j int) bool { a := counters[i] b := counters[j] @@ -487,9 +470,7 @@ func labelsToString(labels []string) string { return b.String() } -var sigilReplacer = strings.NewReplacer("^", "", "- ", "") - -func visitTemplates(t *testing.T, eachTemplate func(path string, model TemplateModel), dirs ...string) { +func visitTemplates(t *testing.T, eachTemplate func(path string, model Model), dirs ...string) { if len(dirs) == 0 { t.Fatalf("must pass list of directories") } @@ -499,7 +480,6 @@ func visitTemplates(t *testing.T, eachTemplate func(path string, model TemplateM if err != nil { return err } - var model TemplateModel ext := filepath.Ext(path) if ext != ".yaml" { return nil @@ -507,19 +487,10 @@ func visitTemplates(t *testing.T, eachTemplate func(path string, model TemplateM if strings.HasSuffix(path, "custom.yaml") || strings.HasSuffix(path, "default.yaml") { return nil } - data, err := os.ReadFile(path) + model, err := ReadTemplate(path) if err != nil { return fmt.Errorf("failed to read template path=%s err=%w", shortPath(path), err) } - model, err = unmarshalModel(data) - if err != nil { - return fmt.Errorf("failed to unmarshalModel template path=%s err=%w", shortPath(path), err) - } - err = addPluginLabels(path, &model) - if err != nil { - // t.Errorf("failed to addPluginLabels template path=%s err=%v", shortPath(path), err) - return err - } eachTemplate(path, model) return nil }) @@ -529,277 +500,6 @@ func visitTemplates(t *testing.T, eachTemplate func(path string, model TemplateM } } -func addPluginLabels(path string, model *TemplateModel) error { - template, err := tree.ImportYaml(path) - if err != nil { - return fmt.Errorf("failed to ImportYaml err: %w", err) - } - err = findBuiltInPlugins(template, model) - if err != nil { - return fmt.Errorf("failed to find findBuiltInPlugins err: %w", err) - } - err = findCustomPlugins(path, template, model) - if err != nil { - return fmt.Errorf("failed to findCustomPlugins err: %w", err) - } - return nil -} - -func flattenCounters(n *y3.Node, metrics *[]metric, parents []string) { - switch n.Tag { - case "!!map": - key := n.Content[0].Value - if key == "hidden_fields" || key == "filter" { - return - } - parents = append(parents, key) - flattenCounters(n.Content[1], metrics, parents) - case "!!seq": - for _, c := range n.Content { - flattenCounters(c, metrics, parents) - } - case "!!str": - *metrics = append(*metrics, newZapiMetric(n, parents)) - } -} - -func newZapiMetric(n *y3.Node, parents []string) metric { - // separate left and right and remove all sigils - text := n.Value - noSigils := sigilReplacer.Replace(text) - before, after, found := strings.Cut(noSigils, "=>") - m := metric{ - line: text, - left: strings.TrimSpace(noSigils), - hasSigil: strings.Contains(text, "^"), - column: n.Column, - parents: parents, - } - if found { - m.left = strings.TrimSpace(before) - m.right = trimComment(after) - m.renameColumn = strings.Index(text, "=>") + n.Column - } - return m -} - -var setRe = regexp.MustCompile(`[sS]etLabel\("?(\w+)"?,`) - -func findCustomPlugins(path string, template *node.Node, model *TemplateModel) error { - plug := template.SearchChildren([]string{"plugins"}) - if len(plug) == 0 { - return nil - } - builtIn := map[string]bool{ - "LabelAgent": true, - "MetricAgent": true, - "Aggregator": true, - "Max": true, - "Tenant": true, - } - for _, child := range plug[0].Children { - name := child.GetNameS() - if name == "" { - name = child.GetContentS() - } - if builtIn[name] { - continue - } - - goPluginName := strings.ToLower(name) - splits := strings.Split(path, "/") - pluginGo := fmt.Sprintf("../../../cmd/collectors/%s/plugins/%s/%s.go", splits[4], goPluginName, goPluginName) - - // Both Zapi and REST sensor.yaml templates uses a single plugin defined in power.go - if strings.Contains(path, "sensor.yaml") { - pluginGo = "../../../cmd/collectors/power.go" - } - - err2 := readPlugin(pluginGo, model) - if err2 != nil { - return err2 - } - // special case for labels added outside normal per-object plugin - if strings.Contains(path, "snapmirror.yaml") || strings.Contains(path, "svm.yaml") { - err2 = readPlugin("../../../cmd/collectors/commonutils.go", model) - if err2 != nil { - return err2 - } - } - } - return nil -} - -func readPlugin(fileName string, model *TemplateModel) error { - file, err := os.Open(fileName) - if err != nil { - return err - } - scanner := bufio.NewScanner(file) - scanner.Split(bufio.ScanLines) - for scanner.Scan() { - text := scanner.Text() - trimmed := strings.TrimSpace(text) - matches := setRe.FindStringSubmatch(trimmed) - if len(matches) == 2 { - model.pluginLabels = append(model.pluginLabels, matches[1]) - } - } - _ = file.Close() - return nil -} - -func findBuiltInPlugins(template *node.Node, model *TemplateModel) error { - template.PreprocessTemplate() - children := template.SearchChildren([]string{"plugins", "LabelAgent"}) - if len(children) == 0 { - return nil - } - abc := plugin.AbstractPlugin{Params: children[0]} - la := labelagent.New(&abc) - err := la.Init() - if err != nil { - return err - } - model.pluginLabels = la.NewLabels() - - children = template.SearchChildren([]string{"plugins", "Aggregator"}) - if len(children) == 0 { - return nil - } - abc = plugin.AbstractPlugin{Params: children[0]} - agg := aggregator.New(&abc) - err = agg.Init() - if err != nil { - return err - } - model.pluginLabels = append(model.pluginLabels, agg.NewLabels()...) - - return nil -} - -func unmarshalModel(data []byte) (TemplateModel, error) { - tm := TemplateModel{} - root := &y3.Node{} - err := y3.Unmarshal(data, root) - if err != nil { - return tm, fmt.Errorf("failed to unmarshal err: %w", err) - } - if len(root.Content) == 0 { - return tm, errs.New(errs.ErrConfig, "template file is empty or does not exist") - } - contentNode := root.Content[0] - err = readNameQueryObject(&tm, contentNode) - if err != nil { - return tm, err - } - countersNode := searchNode(contentNode, "counters") - if countersNode == nil { - return tm, fmt.Errorf("template has no counters") - } - metrics := make([]metric, 0) - flattenCounters(countersNode, &metrics, make([]string, 0)) - addEndpoints(&tm, searchNode(contentNode, "endpoints"), make([]string, 0)) - addExportOptions(&tm, searchNode(contentNode, "export_options")) - - tm.metrics = metrics - return tm, nil -} - -func addExportOptions(tm *TemplateModel, n *y3.Node) { - if n == nil { - return - } - instanceKeys := searchNode(n, "instance_keys") - if instanceKeys != nil { - for _, ikn := range instanceKeys.Content { - tm.ExportOptions.InstanceKeys = append(tm.ExportOptions.InstanceKeys, ikn.Value) - } - } - instanceLabels := searchNode(n, "instance_labels") - if instanceLabels != nil { - for _, il := range instanceLabels.Content { - tm.ExportOptions.InstanceLabels = append(tm.ExportOptions.InstanceLabels, il.Value) - } - } -} - -func readNameQueryObject(tm *TemplateModel, root *y3.Node) error { - nameNode := searchNode(root, "name") - if nameNode != nil { - tm.Name = nameNode.Value - } - queryNode := searchNode(root, "query") - if queryNode != nil { - tm.Query = queryNode.Value - } - objectNode := searchNode(root, "object") - if objectNode != nil { - tm.Object = objectNode.Value - } - if tm.Name == "" { - return fmt.Errorf("template has no name") - } - if tm.Query == "" { - return fmt.Errorf("template has no query") - } - if tm.Object == "" { - return fmt.Errorf("template has no object") - } - return nil -} - -func addEndpoints(tm *TemplateModel, n *y3.Node, parents []string) { - if n == nil { - return - } - for _, m := range n.Content { - query := m.Content[1].Value - metrics := make([]metric, 0) - countersNode := m.Content[3] - flattenCounters(countersNode, &metrics, parents) - ep := &Endpoint{Query: query, metrics: metrics} - tm.Endpoints = append(tm.Endpoints, ep) - } -} - -func searchNode(r *y3.Node, key string) *y3.Node { - for i, n := range r.Content { - if n.Tag == "!!str" && n.Value == key { - return r.Content[i+1] - } - } - return nil -} - -func trimComment(text string) string { - lastSink := strings.Index(text, "#") - if lastSink > -1 { - return strings.TrimSpace(text[:lastSink]) - } - return strings.TrimSpace(text) -} - -type Endpoint struct { - Query string `yaml:"query"` - Counters []string `yaml:"counters"` - metrics []metric -} - -type TemplateModel struct { - Name string `yaml:"name"` - Query string `yaml:"query"` - Object string `yaml:"object"` - Endpoints []*Endpoint `yaml:"endpoints"` - ExportOptions struct { - InstanceKeys []string `yaml:"instance_keys"` - InstanceLabels []string `yaml:"instance_labels"` - IncludeAllLabels bool `yaml:"include_all_labels"` - } `yaml:"export_options"` - metrics []metric - pluginLabels []string -} - func collectorPath(path string) string { const conf string = "conf/" index := strings.Index(path, conf) diff --git a/docs/ontap-metrics.md b/docs/ontap-metrics.md index d5401ec73..904650fb3 100644 --- a/docs/ontap-metrics.md +++ b/docs/ontap-metrics.md @@ -7,7 +7,7 @@ These can be generated on demand by running `bin/harvest grafana metrics`. See - More information about ONTAP REST performance counters can be found [here](https://docs.netapp.com/us-en/ontap-pcmap-9121/index.html). ``` -Creation Date : 2023-Aug-14 +Creation Date : 2023-Oct-05 ONTAP Version: 9.13.1 ``` ## Understanding the structure @@ -37,6 +37,362 @@ Performance related metrics also include: ## Metrics +### aggr_disk_busy + +The utilization percent of the disk. aggr_disk_busy is [disk_busy](#disk_busy) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_capacity + +Disk capacity in MB. aggr_disk_capacity is [disk_capacity](#disk_capacity) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_cp_read_chain is [disk_cp_read_chain](#disk_cp_read_chain) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_cp_read_latency + +Average latency per block in microseconds for consistency point read operations. aggr_disk_cp_read_latency is [disk_cp_read_latency](#disk_cp_read_latency) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_cp_reads + +Number of disk read operations initiated each second for consistency point processing. aggr_disk_cp_reads is [disk_cp_reads](#disk_cp_reads) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_io_pending is [disk_io_pending](#disk_io_pending) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_io_queued + +Number of I/Os queued to the disk but not yet issued. aggr_disk_io_queued is [disk_io_queued](#disk_io_queued) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_busy + +The utilization percent of the disk. aggr_disk_max_busy is the maximum of [disk_busy](#disk_busy) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | + + +### aggr_disk_max_capacity + +Disk capacity in MB. aggr_disk_max_capacity is the maximum of [disk_capacity](#disk_capacity) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | + + +### aggr_disk_max_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_max_cp_read_chain is the maximum of [disk_cp_read_chain](#disk_cp_read_chain) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_cp_read_latency + +Average latency per block in microseconds for consistency point read operations. aggr_disk_max_cp_read_latency is the maximum of [disk_cp_read_latency](#disk_cp_read_latency) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_cp_reads + +Number of disk read operations initiated each second for consistency point processing. aggr_disk_max_cp_reads is the maximum of [disk_cp_reads](#disk_cp_reads) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_disk_busy + +The utilization percent of the disk. aggr_disk_max_disk_busy is the maximum of [disk_busy](#disk_busy) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_disk_capacity + +Disk capacity in MB. aggr_disk_max_disk_capacity is the maximum of [disk_capacity](#disk_capacity) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_max_io_pending is the maximum of [disk_io_pending](#disk_io_pending) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_io_queued + +Number of I/Os queued to the disk but not yet issued. aggr_disk_max_io_queued is the maximum of [disk_io_queued](#disk_io_queued) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_total_data + +Total throughput for user operations per second. aggr_disk_max_total_data is the maximum of [disk_total_data](#disk_total_data) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_total_transfers + +Total number of disk operations involving data transfer initiated per second. aggr_disk_max_total_transfers is the maximum of [disk_total_transfers](#disk_total_transfers) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_read_blocks + +Number of blocks transferred for user read operations per second. aggr_disk_max_user_read_blocks is the maximum of [disk_user_read_blocks](#disk_user_read_blocks) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_read_chain + +Average number of blocks transferred in each user read operation. aggr_disk_max_user_read_chain is the maximum of [disk_user_read_chain](#disk_user_read_chain) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_read_latency + +Average latency per block in microseconds for user read operations. aggr_disk_max_user_read_latency is the maximum of [disk_user_read_latency](#disk_user_read_latency) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_reads + +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_max_user_reads is the maximum of [disk_user_reads](#disk_user_reads) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_write_blocks + +Number of blocks transferred for user write operations per second. aggr_disk_max_user_write_blocks is the maximum of [disk_user_write_blocks](#disk_user_write_blocks) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_write_chain + +Average number of blocks transferred in each user write operation. aggr_disk_max_user_write_chain is the maximum of [disk_user_write_chain](#disk_user_write_chain) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_write_latency + +Average latency per block in microseconds for user write operations. aggr_disk_max_user_write_latency is the maximum of [disk_user_write_latency](#disk_user_write_latency) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_max_user_writes + +Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_max_user_writes is the maximum of [disk_user_writes](#disk_user_writes) for label `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_total_data + +Total throughput for user operations per second. aggr_disk_total_data is [disk_total_data](#disk_total_data) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_total_transfers + +Total number of disk operations involving data transfer initiated per second. aggr_disk_total_transfers is [disk_total_transfers](#disk_total_transfers) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_read_blocks + +Number of blocks transferred for user read operations per second. aggr_disk_user_read_blocks is [disk_user_read_blocks](#disk_user_read_blocks) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_read_chain + +Average number of blocks transferred in each user read operation. aggr_disk_user_read_chain is [disk_user_read_chain](#disk_user_read_chain) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_read_latency + +Average latency per block in microseconds for user read operations. aggr_disk_user_read_latency is [disk_user_read_latency](#disk_user_read_latency) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_reads + +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_user_reads is [disk_user_reads](#disk_user_reads) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_write_blocks + +Number of blocks transferred for user write operations per second. aggr_disk_user_write_blocks is [disk_user_write_blocks](#disk_user_write_blocks) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_write_chain + +Average number of blocks transferred in each user write operation. aggr_disk_user_write_chain is [disk_user_write_chain](#disk_user_write_chain) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_write_latency + +Average latency per block in microseconds for user write operations. aggr_disk_user_write_latency is [disk_user_write_latency](#disk_user_write_latency) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### aggr_disk_user_writes + +Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_user_writes is [disk_user_writes](#disk_user_writes) aggregated by `aggr`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### aggr_efficiency_savings Space saved by storage efficiencies (logical_used - used) @@ -199,8 +555,8 @@ Total Data Reduction Physical Used Without Snapshots | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `space.efficiency_without_snapshots.logical_used, space.efficiency_without_snapshots.savings` | conf/rest/9.12.0/aggr.yaml | | ZAPI | `aggr-efficiency-get-iter` | `aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots` | conf/zapi/cdot/9.9.0/aggr_efficiency.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/aggr.yaml | ### aggr_physical_used_wo_snapshots_flexclones @@ -209,8 +565,8 @@ Total Data Reduction Physical Used without snapshots and flexclones | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `space.efficiency_without_snapshots_flexclones.logical_used, space.efficiency_without_snapshots_flexclones.savings` | conf/rest/9.12.0/aggr.yaml | | ZAPI | `aggr-efficiency-get-iter` | `aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots-flexclones` | conf/zapi/cdot/9.9.0/aggr_efficiency.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/aggr.yaml | ### aggr_power @@ -238,8 +594,8 @@ Number of disks in the aggregate. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `block_storage.primary.disk_count, block_storage.hybrid_cache.disk_count` | conf/rest/9.12.0/aggr.yaml | | ZAPI | `aggr-get-iter` | `aggr-attributes.aggr-raid-attributes.disk-count` | conf/zapi/cdot/9.8.0/aggr.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/aggr.yaml | ### aggr_raid_plex_count @@ -307,8 +663,8 @@ The largest value to which the maxfiles-available parameter can be increased by | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `snapshot.max_files_available, snapshot.max_files_used` | conf/rest/9.12.0/aggr.yaml | | ZAPI | `aggr-get-iter` | `aggr-attributes.aggr-snapshot-attributes.maxfiles-possible` | conf/zapi/cdot/9.8.0/aggr.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/aggr.yaml | ### aggr_snapshot_maxfiles_used @@ -441,6 +797,24 @@ The percentage of inactive user data in the block storage. This property is only | ZAPI | `aggr-get-iter` | `aggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data-percent` | conf/zapi/cdot/9.8.0/aggr.yaml | +### aggr_space_performance_tier_used + +A summation of volume footprints (including volume guarantees), in bytes. This includes all of the volume footprints in the block_storage tier and the cloud_storage tier.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `space.footprint` | conf/rest/9.12.0/aggr.yaml | + + +### aggr_space_performance_tier_used_percent + + + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `space.footprint_percent` | conf/rest/9.12.0/aggr.yaml | + + ### aggr_space_physical_used Total physical used size of an aggregate in bytes. @@ -526,8 +900,8 @@ The percentage of disk space currently in use on the referenced file system | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `space.block_storage.used, space.block_storage.size` | conf/rest/9.12.0/aggr.yaml | | ZAPI | `aggr-get-iter` | `aggr-attributes.aggr-space-attributes.percent-used-capacity` | conf/zapi/cdot/9.8.0/aggr.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/aggr.yaml | ### aggr_total_logical_used @@ -546,8 +920,8 @@ Total Physical Used | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/aggregates` | `space.efficiency.logical_used, space.efficiency.savings` | conf/rest/9.12.0/aggr.yaml | | ZAPI | `aggr-efficiency-get-iter` | `aggr-efficiency-info.aggr-efficiency-cumulative-info.total-physical-used` | conf/zapi/cdot/9.9.0/aggr_efficiency.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/aggr.yaml | ### aggr_volume_count_flexvol @@ -577,6 +951,7 @@ The amount of cloud space used by all the aggregates attached to the target, in | API | Endpoint | Metric | Template | |--------|----------|--------|---------| | REST | `api/cloud/targets` | `used` | conf/rest/9.12.0/cloud_target.yaml | +| ZAPI | `aggr-object-store-config-get-iter` | `aggr-object-store-config-info.used-space` | conf/zapi/cdot/9.10.0/aggr_object_store_config.yaml | ### cluster_new_status @@ -659,6 +1034,16 @@ Current number of copy requests being processed by the SpinCE. | ZAPI | `perf-object-get-instances copy_manager` | `spince_copy_count_curr`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/copy_manager.yaml | +### disk_busy + +The utilization percent of the disk + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### disk_bytes_per_sector Bytes per sector. @@ -669,6 +1054,66 @@ Bytes per sector. | ZAPI | `storage-disk-get-iter` | `storage-disk-info.disk-inventory-info.bytes-per-sector` | conf/zapi/cdot/9.8.0/disk.yaml | +### disk_capacity + +Disk capacity in MB + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_cp_read_latency + +Average latency per block in microseconds for consistency point read operations + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_cp_reads + +Number of disk read operations initiated each second for consistency point processing + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_io_queued + +Number of I/Os queued to the disk but not yet issued + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### disk_power_on_hours Hours powered on. @@ -728,14 +1173,34 @@ Number of Sectors Written | REST | `api/private/cli/disk` | `sectors_written` | conf/rest/9.12.0/disk.yaml | +### disk_total_data + +Total throughput for user operations per second + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_total_transfers + +Total number of disk operations involving data transfer initiated per second + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### disk_uptime Number of seconds the drive has been powered on | API | Endpoint | Metric | Template | |--------|----------|--------|---------| +| REST | `api/storage/disks` | `stats.power_on_hours, 60, 60` | conf/rest/9.12.0/disk.yaml | | ZAPI | `storage-disk-get-iter` | `storage-disk-info.disk-stats-info.power-on-time-interval` | conf/zapi/cdot/9.8.0/disk.yaml | -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/disk.yaml | ### disk_usable_size @@ -747,6 +1212,86 @@ Usable size of each disk, in bytes. | REST | `api/storage/disks` | `usable_size` | conf/rest/9.12.0/disk.yaml | +### disk_user_read_blocks + +Number of blocks transferred for user read operations per second + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_read_chain + +Average number of blocks transferred in each user read operation + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_read_latency + +Average latency per block in microseconds for user read operations + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_reads + +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_write_blocks + +Number of blocks transferred for user write operations per second + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_write_chain + +Average number of blocks transferred in each user write operation + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_write_latency + +Average latency per block in microseconds for user write operations + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### disk_user_writes + +Number of disk write operations initiated each second for storing data or metadata associated with user requests + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### environment_sensor_average_ambient_temperature Average temperature of all ambient sensors for node in Celsius. @@ -2638,6 +3183,16 @@ The amount of space consumed by the main data stream of the LUN.
This value | ZAPI | `lun-get-iter` | `lun-info.size-used` | conf/zapi/cdot/9.8.0/lun.yaml | +### lun_size_used_percent + + + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/storage/luns` | `size_used, size` | conf/rest/9.12.0/lun.yaml | +| ZAPI | `lun-get-iter` | `size_used, size` | conf/zapi/cdot/9.8.0/lun.yaml | + + ### lun_unmap_reqs Number of unmap command requests @@ -2942,30 +3497,50 @@ Number of read operations Remote read bytes -| API | Endpoint | Metric | Template | -|--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/namespace` | `remote.read_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml | -| ZAPI | `perf-object-get-instances namespace` | `remote_bytes`
Unit:
Type:
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml | +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/namespace` | `remote.read_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml | +| ZAPI | `perf-object-get-instances namespace` | `remote_bytes`
Unit:
Type:
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml | + + +### namespace_remote_ops + +Number of remote read operations + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/namespace` | `remote.read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml | +| ZAPI | `perf-object-get-instances namespace` | `remote_ops`
Unit:
Type:
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml | + + +### namespace_size + +The total provisioned size of the NVMe namespace. Valid in POST and PATCH. The NVMe namespace size can be increased but not be made smaller using the REST interface.
The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The maximum size is variable with respect to large NVMe namespace support in ONTAP. If large namespaces are supported, the maximum size is 128 TB (140737488355328 bytes) and if not supported, the maximum size is just under 16 TB (17557557870592 bytes). The minimum size supported is always 4096 bytes.
For more information, see _Size properties_ in the _docs_ section of the ONTAP REST API documentation. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/storage/namespaces` | `space.size` | conf/rest/9.12.0/namespace.yaml | +| ZAPI | `nvme-namespace-get-iter` | `nvme-namespace-info.size` | conf/zapi/cdot/9.8.0/namespace.yaml | + +### namespace_size_available -### namespace_remote_ops -Number of remote read operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/namespace` | `remote.read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml | -| ZAPI | `perf-object-get-instances namespace` | `remote_ops`
Unit:
Type:
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml | +| REST | `api/storage/namespaces` | `size, size_used` | conf/rest/9.12.0/namespace.yaml | +| ZAPI | `nvme-namespace-get-iter` | `size, size_used` | conf/zapi/cdot/9.8.0/namespace.yaml | -### namespace_size +### namespace_size_available_percent + -The total provisioned size of the NVMe namespace. Valid in POST and PATCH. The NVMe namespace size can be increased but not be made smaller using the REST interface.
The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The maximum size is variable with respect to large NVMe namespace support in ONTAP. If large namespaces are supported, the maximum size is 128 TB (140737488355328 bytes) and if not supported, the maximum size is just under 16 TB (17557557870592 bytes). The minimum size supported is always 4096 bytes.
For more information, see _Size properties_ in the _docs_ section of the ONTAP REST API documentation. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/storage/namespaces` | `space.size` | conf/rest/9.12.0/namespace.yaml | -| ZAPI | `nvme-namespace-get-iter` | `nvme-namespace-info.size` | conf/zapi/cdot/9.8.0/namespace.yaml | +| REST | `api/storage/namespaces` | `size_available, size` | conf/rest/9.12.0/namespace.yaml | +| ZAPI | `nvme-namespace-get-iter` | `size_available, size` | conf/zapi/cdot/9.8.0/namespace.yaml | ### namespace_size_used @@ -3649,74 +4224,430 @@ Average latency for CIFS write operations | ZAPI | `perf-object-get-instances cifs:node` | `cifs_write_latency`
Unit: microsec
Type: average
Base: cifs_write_ops | conf/zapiperf/cdot/9.8.0/cifs_node.yaml | -### node_cifs_write_ops +### node_cifs_write_ops + +Total number of CIFS write operations + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/svm_cifs:node` | `total_write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_node.yaml | +| ZAPI | `perf-object-get-instances cifs:node` | `cifs_write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/cifs_node.yaml | + + +### node_cpu_busy + +System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/system:node` | `cpu_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/restperf/9.12.0/system_node.yaml | +| ZAPI | `perf-object-get-instances system:node` | `cpu_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/zapiperf/cdot/9.8.0/system_node.yaml | + + +### node_cpu_busytime + +The time (in hundredths of a second) that the CPU has been doing useful work since the last boot + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| ZAPI | `system-node-get-iter` | `node-details-info.cpu-busytime` | conf/zapi/cdot/9.8.0/node.yaml | +| REST | `api/private/cli/node` | `cpu_busy_time` | conf/rest/9.12.0/node.yaml | + + +### node_cpu_domain_busy + +Array of processor time in percentage spent in various domains + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/system:node` | `domain_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/restperf/9.12.0/system_node.yaml | +| ZAPI | `perf-object-get-instances system:node` | `domain_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/zapiperf/cdot/9.8.0/system_node.yaml | + + +### node_cpu_elapsed_time + +Elapsed time since boot + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/system:node` | `cpu_elapsed_time`
Unit: microsec
Type: delta
Base: | conf/restperf/9.12.0/system_node.yaml | +| ZAPI | `perf-object-get-instances system:node` | `cpu_elapsed_time`
Unit: none
Type: delta,no-display
Base: | conf/zapiperf/cdot/9.8.0/system_node.yaml | + + +### node_disk_busy + +The utilization percent of the disk. node_disk_busy is [disk_busy](#disk_busy) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_capacity + +Disk capacity in MB. node_disk_capacity is [disk_capacity](#disk_capacity) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP. node_disk_cp_read_chain is [disk_cp_read_chain](#disk_cp_read_chain) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_cp_read_latency + +Average latency per block in microseconds for consistency point read operations. node_disk_cp_read_latency is [disk_cp_read_latency](#disk_cp_read_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_cp_reads + +Number of disk read operations initiated each second for consistency point processing. node_disk_cp_reads is [disk_cp_reads](#disk_cp_reads) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_data_read + +Number of disk kilobytes (KB) read per second + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/system:node` | `disk_data_read`
Unit: kb_per_sec
Type: rate
Base: | conf/restperf/9.12.0/system_node.yaml | +| ZAPI | `perf-object-get-instances system:node` | `disk_data_read`
Unit: kb_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/system_node.yaml | + + +### node_disk_data_written + +Number of disk kilobytes (KB) written per second + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/system:node` | `disk_data_written`
Unit: kb_per_sec
Type: rate
Base: | conf/restperf/9.12.0/system_node.yaml | +| ZAPI | `perf-object-get-instances system:node` | `disk_data_written`
Unit: kb_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/system_node.yaml | + + +### node_disk_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_io_pending is [disk_io_pending](#disk_io_pending) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_io_queued + +Number of I/Os queued to the disk but not yet issued. node_disk_io_queued is [disk_io_queued](#disk_io_queued) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_busy + +The utilization percent of the disk. node_disk_max_busy is the maximum of [disk_busy](#disk_busy) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | + + +### node_disk_max_capacity + +Disk capacity in MB. node_disk_max_capacity is the maximum of [disk_capacity](#disk_capacity) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | + + +### node_disk_max_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP. node_disk_max_cp_read_chain is the maximum of [disk_cp_read_chain](#disk_cp_read_chain) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_cp_read_latency + +Average latency per block in microseconds for consistency point read operations. node_disk_max_cp_read_latency is the maximum of [disk_cp_read_latency](#disk_cp_read_latency) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_cp_reads + +Number of disk read operations initiated each second for consistency point processing. node_disk_max_cp_reads is the maximum of [disk_cp_reads](#disk_cp_reads) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_disk_busy + +The utilization percent of the disk. node_disk_max_disk_busy is the maximum of [disk_busy](#disk_busy) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_disk_capacity + +Disk capacity in MB. node_disk_max_disk_capacity is the maximum of [disk_capacity](#disk_capacity) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_max_io_pending is the maximum of [disk_io_pending](#disk_io_pending) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_io_queued + +Number of I/Os queued to the disk but not yet issued. node_disk_max_io_queued is the maximum of [disk_io_queued](#disk_io_queued) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_total_data + +Total throughput for user operations per second. node_disk_max_total_data is the maximum of [disk_total_data](#disk_total_data) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_total_transfers + +Total number of disk operations involving data transfer initiated per second. node_disk_max_total_transfers is the maximum of [disk_total_transfers](#disk_total_transfers) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_read_blocks + +Number of blocks transferred for user read operations per second. node_disk_max_user_read_blocks is the maximum of [disk_user_read_blocks](#disk_user_read_blocks) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_read_chain + +Average number of blocks transferred in each user read operation. node_disk_max_user_read_chain is the maximum of [disk_user_read_chain](#disk_user_read_chain) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_read_latency + +Average latency per block in microseconds for user read operations. node_disk_max_user_read_latency is the maximum of [disk_user_read_latency](#disk_user_read_latency) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_reads + +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_max_user_reads is the maximum of [disk_user_reads](#disk_user_reads) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_write_blocks + +Number of blocks transferred for user write operations per second. node_disk_max_user_write_blocks is the maximum of [disk_user_write_blocks](#disk_user_write_blocks) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_write_chain + +Average number of blocks transferred in each user write operation. node_disk_max_user_write_chain is the maximum of [disk_user_write_chain](#disk_user_write_chain) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_write_latency + +Average latency per block in microseconds for user write operations. node_disk_max_user_write_latency is the maximum of [disk_user_write_latency](#disk_user_write_latency) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_max_user_writes + +Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_max_user_writes is the maximum of [disk_user_writes](#disk_user_writes) for label `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_total_data + +Total throughput for user operations per second. node_disk_total_data is [disk_total_data](#disk_total_data) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_total_transfers + +Total number of disk operations involving data transfer initiated per second. node_disk_total_transfers is [disk_total_transfers](#disk_total_transfers) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_user_read_blocks + +Number of blocks transferred for user read operations per second. node_disk_user_read_blocks is [disk_user_read_blocks](#disk_user_read_blocks) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### node_disk_user_read_chain -Total number of CIFS write operations +Average number of blocks transferred in each user read operation. node_disk_user_read_chain is [disk_user_read_chain](#disk_user_read_chain) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs:node` | `total_write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_node.yaml | -| ZAPI | `perf-object-get-instances cifs:node` | `cifs_write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/cifs_node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | -### node_cpu_busy +### node_disk_user_read_latency -System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first. +Average latency per block in microseconds for user read operations. node_disk_user_read_latency is [disk_user_read_latency](#disk_user_read_latency) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/system:node` | `cpu_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/restperf/9.12.0/system_node.yaml | -| ZAPI | `perf-object-get-instances system:node` | `cpu_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/zapiperf/cdot/9.8.0/system_node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | -### node_cpu_busytime +### node_disk_user_reads -The time (in hundredths of a second) that the CPU has been doing useful work since the last boot +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_user_reads is [disk_user_reads](#disk_user_reads) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| ZAPI | `system-node-get-iter` | `node-details-info.cpu-busytime` | conf/zapi/cdot/9.8.0/node.yaml | -| REST | `api/private/cli/node` | `cpu_busy_time` | conf/rest/9.12.0/node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | -### node_cpu_domain_busy +### node_disk_user_write_blocks -Array of processor time in percentage spent in various domains +Number of blocks transferred for user write operations per second. node_disk_user_write_blocks is [disk_user_write_blocks](#disk_user_write_blocks) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/system:node` | `domain_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/restperf/9.12.0/system_node.yaml | -| ZAPI | `perf-object-get-instances system:node` | `domain_busy`
Unit: percent
Type: percent
Base: cpu_elapsed_time | conf/zapiperf/cdot/9.8.0/system_node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | -### node_cpu_elapsed_time +### node_disk_user_write_chain -Elapsed time since boot +Average number of blocks transferred in each user write operation. node_disk_user_write_chain is [disk_user_write_chain](#disk_user_write_chain) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/system:node` | `cpu_elapsed_time`
Unit: microsec
Type: delta
Base: | conf/restperf/9.12.0/system_node.yaml | -| ZAPI | `perf-object-get-instances system:node` | `cpu_elapsed_time`
Unit: none
Type: delta,no-display
Base: | conf/zapiperf/cdot/9.8.0/system_node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | -### node_disk_data_read +### node_disk_user_write_latency -Number of disk kilobytes (KB) read per second +Average latency per block in microseconds for user write operations. node_disk_user_write_latency is [disk_user_write_latency](#disk_user_write_latency) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/system:node` | `disk_data_read`
Unit: kb_per_sec
Type: rate
Base: | conf/restperf/9.12.0/system_node.yaml | -| ZAPI | `perf-object-get-instances system:node` | `disk_data_read`
Unit: kb_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/system_node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | -### node_disk_data_written +### node_disk_user_writes -Number of disk kilobytes (KB) written per second +Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_user_writes is [disk_user_writes](#disk_user_writes) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/system:node` | `disk_data_written`
Unit: kb_per_sec
Type: rate
Base: | conf/restperf/9.12.0/system_node.yaml | -| ZAPI | `perf-object-get-instances system:node` | `disk_data_written`
Unit: kb_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/system_node.yaml | +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | ### node_failed_fan @@ -5589,74 +6520,344 @@ Bytes read per second via NFS | ZAPI | `perf-object-get-instances volume:node` | `nfs_read_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | -### node_vol_nfs_read_latency +### node_vol_nfs_read_latency + +Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `nfs.read_latency`
Unit: microsec
Type: average
Base: nfs.read_ops | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `nfs_read_latency`
Unit: microsec
Type: average
Base: nfs_read_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_vol_nfs_read_ops + +Number of NFS read operations per second from the volume + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `nfs.read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `nfs_read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_vol_nfs_write_data + +Bytes written per second via NFS + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `nfs.write_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `nfs_write_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_vol_nfs_write_latency + +Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `nfs.write_latency`
Unit: microsec
Type: average
Base: nfs.write_ops | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `nfs_write_latency`
Unit: microsec
Type: average
Base: nfs_write_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_vol_nfs_write_ops + +Number of NFS write operations per second to the volume + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `nfs.write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `nfs_write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_vol_read_latency + +Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `read_latency`
Unit: microsec
Type: average
Base: total_read_ops | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_vol_write_latency + +Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume:node` | `write_latency`
Unit: microsec
Type: average
Base: total_write_ops | conf/restperf/9.12.0/volume_node.yaml | +| ZAPI | `perf-object-get-instances volume:node` | `write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | + + +### node_volume_avg_latency + +Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time. node_volume_avg_latency is [volume_avg_latency](#volume_avg_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `average_latency`
Unit: microsec
Type: average
Base: total_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `avg_latency`
Unit: microsec
Type: average
Base: total_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_access_latency + +Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_access_latency is [volume_nfs_access_latency](#volume_nfs_access_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.access_latency`
Unit: microsec
Type: average
Base: nfs.access_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_access_latency`
Unit: microsec
Type: average
Base: nfs_access_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_access_ops + +Number of NFS accesses per second to the volume. node_volume_nfs_access_ops is [volume_nfs_access_ops](#volume_nfs_access_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.access_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_access_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_getattr_latency + +Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_getattr_latency is [volume_nfs_getattr_latency](#volume_nfs_getattr_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.getattr_latency`
Unit: microsec
Type: average
Base: nfs.getattr_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_getattr_latency`
Unit: microsec
Type: average
Base: nfs_getattr_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_getattr_ops + +Number of NFS getattr per second to the volume. node_volume_nfs_getattr_ops is [volume_nfs_getattr_ops](#volume_nfs_getattr_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.getattr_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_getattr_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_lookup_latency + +Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_lookup_latency is [volume_nfs_lookup_latency](#volume_nfs_lookup_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.lookup_latency`
Unit: microsec
Type: average
Base: nfs.lookup_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_lookup_latency`
Unit: microsec
Type: average
Base: nfs_lookup_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_lookup_ops + +Number of NFS lookups per second to the volume. node_volume_nfs_lookup_ops is [volume_nfs_lookup_ops](#volume_nfs_lookup_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.lookup_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_lookup_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_other_latency + +Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_other_latency is [volume_nfs_other_latency](#volume_nfs_other_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.other_latency`
Unit: microsec
Type: average
Base: nfs.other_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_other_latency`
Unit: microsec
Type: average
Base: nfs_other_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_other_ops + +Number of other NFS operations per second to the volume. node_volume_nfs_other_ops is [volume_nfs_other_ops](#volume_nfs_other_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.other_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_other_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_punch_hole_latency + +Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume. node_volume_nfs_punch_hole_latency is [volume_nfs_punch_hole_latency](#volume_nfs_punch_hole_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.punch_hole_latency`
Unit: microsec
Type: average
Base: nfs.punch_hole_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_punch_hole_latency`
Unit: microsec
Type: average
Base: nfs_punch_hole_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_punch_hole_ops + +Number of NFS hole-punch requests per second to the volume. node_volume_nfs_punch_hole_ops is [volume_nfs_punch_hole_ops](#volume_nfs_punch_hole_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.punch_hole_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_punch_hole_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_read_latency + +Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_read_latency is [volume_nfs_read_latency](#volume_nfs_read_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.read_latency`
Unit: microsec
Type: average
Base: nfs.read_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_read_latency`
Unit: microsec
Type: average
Base: nfs_read_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_read_ops + +Number of NFS read operations per second from the volume. node_volume_nfs_read_ops is [volume_nfs_read_ops](#volume_nfs_read_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_setattr_latency + +Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume. node_volume_nfs_setattr_latency is [volume_nfs_setattr_latency](#volume_nfs_setattr_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.setattr_latency`
Unit: microsec
Type: average
Base: nfs.setattr_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_setattr_latency`
Unit: microsec
Type: average
Base: nfs_setattr_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_setattr_ops + +Number of NFS setattr requests per second to the volume. node_volume_nfs_setattr_ops is [volume_nfs_setattr_ops](#volume_nfs_setattr_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.setattr_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_setattr_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_total_ops + +Number of total NFS operations per second to the volume. node_volume_nfs_total_ops is [volume_nfs_total_ops](#volume_nfs_total_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.total_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_total_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_write_latency + +Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency. node_volume_nfs_write_latency is [volume_nfs_write_latency](#volume_nfs_write_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.write_latency`
Unit: microsec
Type: average
Base: nfs.write_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_write_latency`
Unit: microsec
Type: average
Base: nfs_write_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_nfs_write_ops + +Number of NFS write operations per second to the volume. node_volume_nfs_write_ops is [volume_nfs_write_ops](#volume_nfs_write_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `nfs.write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `nfs_write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_other_latency + +Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time. node_volume_other_latency is [volume_other_latency](#volume_other_latency) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `other_latency`
Unit: microsec
Type: average
Base: total_other_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `other_latency`
Unit: microsec
Type: average
Base: other_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_other_ops + +Number of other operations per second to the volume. node_volume_other_ops is [volume_other_ops](#volume_other_ops) aggregated by `node`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/volume` | `total_other_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `other_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | + + +### node_volume_read_data -Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency +Bytes read per second. node_volume_read_data is [volume_read_data](#volume_read_data) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `nfs.read_latency`
Unit: microsec
Type: average
Base: nfs.read_ops | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `nfs_read_latency`
Unit: microsec
Type: average
Base: nfs_read_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `bytes_read`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `read_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | -### node_vol_nfs_read_ops +### node_volume_read_latency -Number of NFS read operations per second from the volume +Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time. node_volume_read_latency is [volume_read_latency](#volume_read_latency) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `nfs.read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `nfs_read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `read_latency`
Unit: microsec
Type: average
Base: total_read_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | -### node_vol_nfs_write_data +### node_volume_read_ops -Bytes written per second via NFS +Number of read operations per second from the volume. node_volume_read_ops is [volume_read_ops](#volume_read_ops) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `nfs.write_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `nfs_write_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `total_read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | -### node_vol_nfs_write_latency +### node_volume_total_ops -Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency +Number of operations per second serviced by the volume. node_volume_total_ops is [volume_total_ops](#volume_total_ops) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `nfs.write_latency`
Unit: microsec
Type: average
Base: nfs.write_ops | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `nfs_write_latency`
Unit: microsec
Type: average
Base: nfs_write_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `total_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `total_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | -### node_vol_nfs_write_ops +### node_volume_write_data -Number of NFS write operations per second to the volume +Bytes written per second. node_volume_write_data is [volume_write_data](#volume_write_data) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `nfs.write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `nfs_write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `bytes_written`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `write_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | -### node_vol_read_latency +### node_volume_write_latency -Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time +Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time. node_volume_write_latency is [volume_write_latency](#volume_write_latency) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `read_latency`
Unit: microsec
Type: average
Base: total_read_ops | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `write_latency`
Unit: microsec
Type: average
Base: total_write_ops | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/zapiperf/cdot/9.8.0/volume.yaml | -### node_vol_write_latency +### node_volume_write_ops -Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time +Number of write operations per second to the volume. node_volume_write_ops is [volume_write_ops](#volume_write_ops) aggregated by `node`. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/volume:node` | `write_latency`
Unit: microsec
Type: average
Base: total_write_ops | conf/restperf/9.12.0/volume_node.yaml | -| ZAPI | `perf-object-get-instances volume:node` | `write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/zapiperf/cdot/9.8.0/volume_node.yaml | +| REST | `api/cluster/counter/tables/volume` | `total_write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/volume.yaml | +| ZAPI | `perf-object-get-instances volume` | `write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/volume.yaml | ### nvme_lif_avg_latency @@ -7127,6 +8328,15 @@ Number of Upload Part operations. | ZAPI | `perf-object-get-instances object_store_server` | `upload_part_total`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml | +### ontaps3_used_percent + + + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/protocols/s3/buckets` | `logical_used_size, size` | conf/rest/9.7.0/ontap_s3.yaml | + + ### path_read_data The average read throughput in kilobytes per second read from the indicated target port by the controller. @@ -7207,6 +8417,176 @@ The average latency of I/O write operations sent from this controller to the ind | ZAPI | `perf-object-get-instances path` | `write_latency`
Unit: microsec
Type: average
Base: write_iops | conf/zapiperf/cdot/9.8.0/path.yaml | +### plex_disk_busy + +The utilization percent of the disk. plex_disk_busy is [disk_busy](#disk_busy) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_capacity + +Disk capacity in MB. plex_disk_capacity is [disk_capacity](#disk_capacity) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP. plex_disk_cp_read_chain is [disk_cp_read_chain](#disk_cp_read_chain) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_cp_read_latency + +Average latency per block in microseconds for consistency point read operations. plex_disk_cp_read_latency is [disk_cp_read_latency](#disk_cp_read_latency) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_cp_reads + +Number of disk read operations initiated each second for consistency point processing. plex_disk_cp_reads is [disk_cp_reads](#disk_cp_reads) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response. plex_disk_io_pending is [disk_io_pending](#disk_io_pending) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_io_queued + +Number of I/Os queued to the disk but not yet issued. plex_disk_io_queued is [disk_io_queued](#disk_io_queued) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_total_data + +Total throughput for user operations per second. plex_disk_total_data is [disk_total_data](#disk_total_data) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_total_transfers + +Total number of disk operations involving data transfer initiated per second. plex_disk_total_transfers is [disk_total_transfers](#disk_total_transfers) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_read_blocks + +Number of blocks transferred for user read operations per second. plex_disk_user_read_blocks is [disk_user_read_blocks](#disk_user_read_blocks) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_read_chain + +Average number of blocks transferred in each user read operation. plex_disk_user_read_chain is [disk_user_read_chain](#disk_user_read_chain) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_read_latency + +Average latency per block in microseconds for user read operations. plex_disk_user_read_latency is [disk_user_read_latency](#disk_user_read_latency) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_reads + +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. plex_disk_user_reads is [disk_user_reads](#disk_user_reads) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_write_blocks + +Number of blocks transferred for user write operations per second. plex_disk_user_write_blocks is [disk_user_write_blocks](#disk_user_write_blocks) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_write_chain + +Average number of blocks transferred in each user write operation. plex_disk_user_write_chain is [disk_user_write_chain](#disk_user_write_chain) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_write_latency + +Average latency per block in microseconds for user write operations. plex_disk_user_write_latency is [disk_user_write_latency](#disk_user_write_latency) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### plex_disk_user_writes + +Number of disk write operations initiated each second for storing data or metadata associated with user requests. plex_disk_user_writes is [disk_user_writes](#disk_user_writes) aggregated by `plex`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### qos_concurrency This is the average number of concurrent requests for the workload. @@ -7524,6 +8904,176 @@ Disk space threshold, in kilobytes, for the quota target. The value is -1 if the | ZAPI | `quota-report-iter` | `threshold` | conf/zapi/cdot/9.8.0/qtree.yaml | +### raid_disk_busy + +The utilization percent of the disk. raid_disk_busy is [disk_busy](#disk_busy) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `disk_busy_percent`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_busy`
Unit: percent
Type: percent
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_capacity + +Disk capacity in MB. raid_disk_capacity is [disk_capacity](#disk_capacity) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `capacity`
Unit: mb
Type: raw
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `disk_capacity`
Unit: mb
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_cp_read_chain + +Average number of blocks transferred in each consistency point read operation during a CP. raid_disk_cp_read_chain is [disk_cp_read_chain](#disk_cp_read_chain) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_chain`
Unit: none
Type: average
Base: cp_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_cp_read_latency + +Average latency per block in microseconds for consistency point read operations. raid_disk_cp_read_latency is [disk_cp_read_latency](#disk_cp_read_latency) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_read_latency`
Unit: microsec
Type: average
Base: cp_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_cp_reads + +Number of disk read operations initiated each second for consistency point processing. raid_disk_cp_reads is [disk_cp_reads](#disk_cp_reads) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `cp_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `cp_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_io_pending + +Average number of I/Os issued to the disk for which we have not yet received the response. raid_disk_io_pending is [disk_io_pending](#disk_io_pending) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_pending`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_io_queued + +Number of I/Os queued to the disk but not yet issued. raid_disk_io_queued is [disk_io_queued](#disk_io_queued) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `io_queued`
Unit: none
Type: average
Base: base_for_disk_busy | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_total_data + +Total throughput for user operations per second. raid_disk_total_data is [disk_total_data](#disk_total_data) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_total_transfers + +Total number of disk operations involving data transfer initiated per second. raid_disk_total_transfers is [disk_total_transfers](#disk_total_transfers) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `total_transfer_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `total_transfers`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_read_blocks + +Number of blocks transferred for user read operations per second. raid_disk_user_read_blocks is [disk_user_read_blocks](#disk_user_read_blocks) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_read_chain + +Average number of blocks transferred in each user read operation. raid_disk_user_read_chain is [disk_user_read_chain](#disk_user_read_chain) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_read_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_chain`
Unit: none
Type: average
Base: user_reads | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_read_latency + +Average latency per block in microseconds for user read operations. raid_disk_user_read_latency is [disk_user_read_latency](#disk_user_read_latency) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_read_latency`
Unit: microsec
Type: average
Base: user_read_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_reads + +Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. raid_disk_user_reads is [disk_user_reads](#disk_user_reads) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_read_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_reads`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_write_blocks + +Number of blocks transferred for user write operations per second. raid_disk_user_write_blocks is [disk_user_write_blocks](#disk_user_write_blocks) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_block_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_blocks`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_write_chain + +Average number of blocks transferred in each user write operation. raid_disk_user_write_chain is [disk_user_write_chain](#disk_user_write_chain) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_write_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_chain`
Unit: none
Type: average
Base: user_writes | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_write_latency + +Average latency per block in microseconds for user write operations. raid_disk_user_write_latency is [disk_user_write_latency](#disk_user_write_latency) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_block_count | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_write_latency`
Unit: microsec
Type: average
Base: user_write_blocks | conf/zapiperf/cdot/9.8.0/disk.yaml | + + +### raid_disk_user_writes + +Number of disk write operations initiated each second for storing data or metadata associated with user requests. raid_disk_user_writes is [disk_user_writes](#disk_user_writes) aggregated by `raid`. + +| API | Endpoint | Metric | Template | +|--------|----------|--------|---------| +| REST | `api/cluster/counter/tables/disk:constituent` | `user_write_count`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/disk.yaml | +| ZAPI | `perf-object-get-instances disk:constituent` | `user_writes`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/disk.yaml | + + ### security_audit_destination_port The destination port used to forward the message. @@ -8086,7 +9636,7 @@ Number of connections | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `connections`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `connections`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `connections`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8096,7 +9646,7 @@ Number of established SMB and SMB2 sessions | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `established_sessions`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `established_sessions`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `established_sessions`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8106,7 +9656,7 @@ Average latency for CIFS operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `latency`
Unit: microsec
Type: average
Base: latency_base | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `latency`
Unit: microsec
Type: average
Base: latency_base | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_latency`
Unit: microsec
Type: average
Base: cifs_latency_base | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8116,7 +9666,7 @@ Array of select CIFS operation counts | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `op_count`
Unit: none
Type: rate
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `op_count`
Unit: none
Type: rate
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_op_count`
Unit: none
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8126,7 +9676,7 @@ Number of open files over SMB and SMB2 | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `open_files`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `open_files`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `open_files`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8136,7 +9686,7 @@ Total number of CIFS operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `total_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `total_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8146,7 +9696,7 @@ Average latency for CIFS read operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `average_read_latency`
Unit: microsec
Type: average
Base: total_read_ops | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `average_read_latency`
Unit: microsec
Type: average
Base: total_read_ops | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_read_latency`
Unit: microsec
Type: average
Base: cifs_read_ops | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8156,7 +9706,7 @@ Total number of CIFS read operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `total_read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `total_read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8166,7 +9716,7 @@ Number of signed SMB and SMB2 sessions. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `signed_sessions`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `signed_sessions`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `signed_sessions`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8176,7 +9726,7 @@ Average latency for CIFS write operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `average_write_latency`
Unit: microsec
Type: average
Base: total_write_ops | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `average_write_latency`
Unit: microsec
Type: average
Base: total_write_ops | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_write_latency`
Unit: microsec
Type: average
Base: cifs_write_ops | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -8186,7 +9736,7 @@ Total number of CIFS write operations | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_cifs` | `total_write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_svm.yaml | +| REST | `api/cluster/counter/tables/svm_cifs` | `total_write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/cifs_vserver.yaml | | ZAPI | `perf-object-get-instances cifs:vserver` | `cifs_write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml | @@ -9886,8 +11436,8 @@ volume_inode_files_used / volume_inode_total | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/volume.yaml | -| ZAPI | `NA` | `Harvest generated` | conf/zapi/cdot/9.8.0/volume.yaml | +| REST | `api/storage/volumes` | `inode_files_used, inode_files_total` | conf/rest/9.9.0/volume.yaml | +| ZAPI | `volume-get-iter` | `inode_files_used, inode_files_total` | conf/zapi/cdot/9.8.0/volume.yaml | ### volume_nfs_access_latency @@ -10086,8 +11636,8 @@ amount of storage space that is currently available for overwrites, calculated b | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/volume.yaml | -| ZAPI | `NA` | `Harvest generated` | conf/zapi/cdot/9.8.0/volume.yaml | +| REST | `api/storage/volumes` | `overwrite_reserve_total, overwrite_reserve_used` | conf/rest/9.9.0/volume.yaml | +| ZAPI | `volume-get-iter` | `overwrite_reserve_total, overwrite_reserve_used` | conf/zapi/cdot/9.8.0/volume.yaml | ### volume_overwrite_reserve_total @@ -10296,8 +11846,8 @@ amount of storage space currently used by a volume's snapshot reserve, which is | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `NA` | `Harvest generated` | conf/rest/9.12.0/volume.yaml | -| ZAPI | `NA` | `Harvest generated` | conf/zapi/cdot/9.8.0/volume.yaml | +| REST | `api/storage/volumes` | `snapshot_reserve_size, snapshot_reserve_available` | conf/rest/9.9.0/volume.yaml | +| ZAPI | `volume-get-iter` | `snapshot_reserve_size, snapshot_reserve_available` | conf/zapi/cdot/9.8.0/volume.yaml | ### volume_snapshot_reserve_used_percent From b5ea9653f443eedb06acca93f0f1cdbf76d48b8e Mon Sep 17 00:00:00 2001 From: Chris Grindstaff Date: Wed, 11 Oct 2023 10:09:03 -0400 Subject: [PATCH 2/2] doc: `generate metrics` should include metrics Created by builtin plugins: Aggregator, Max, and MetricAgent fix: resource_headroom templates should use `api` prefixed query --- cmd/tools/template/template.go | 4 +- cmd/tools/template/template_test.go | 8 +++ .../9.12.0/resource_headroom_aggr.yaml | 2 +- .../9.12.0/resource_headroom_cpu.yaml | 2 +- docs/ontap-metrics.md | 54 +++++++++---------- 5 files changed, 39 insertions(+), 31 deletions(-) diff --git a/cmd/tools/template/template.go b/cmd/tools/template/template.go index 45eaf8d05..297a47e17 100644 --- a/cmd/tools/template/template.go +++ b/cmd/tools/template/template.go @@ -215,13 +215,13 @@ func flattenCounters(n *y3.Node, metrics *[]Metric, parents []string) { flattenCounters(c, metrics, parents) } case "!!str": - *metrics = append(*metrics, newZapiMetric(n, parents)) + *metrics = append(*metrics, newMetric(n, parents)) } } var sigilReplacer = strings.NewReplacer("^", "", "- ", "") -func newZapiMetric(n *y3.Node, parents []string) Metric { +func newMetric(n *y3.Node, parents []string) Metric { // separate left and right and remove all sigils text := n.Value noSigils := sigilReplacer.Replace(text) diff --git a/cmd/tools/template/template_test.go b/cmd/tools/template/template_test.go index 3a94a5ab2..7241373a2 100644 --- a/cmd/tools/template/template_test.go +++ b/cmd/tools/template/template_test.go @@ -352,6 +352,14 @@ func (m Metric) pathString() string { return strings.Join(m.parents, "/") + "/" + m.left } +func TestQueryPrefix(t *testing.T) { + visitTemplates(t, func(path string, model Model) { + if !strings.HasPrefix(model.Query, "api/") { + t.Errorf("query should be prefixed with api/, got=%s path=[%s]", model.Query, shortPath(path)) + } + }, []string{"rest", "restperf"}...) +} + // Tests that keys and metrics are sorted in the following order: // - double hats (alphabetically) // - single hats (alphabetically) diff --git a/conf/restperf/9.12.0/resource_headroom_aggr.yaml b/conf/restperf/9.12.0/resource_headroom_aggr.yaml index dae9ad6f2..7dc1150f7 100644 --- a/conf/restperf/9.12.0/resource_headroom_aggr.yaml +++ b/conf/restperf/9.12.0/resource_headroom_aggr.yaml @@ -1,5 +1,5 @@ name: HeadroomAggr -query: /api/cluster/counter/tables/headroom_aggregate +query: api/cluster/counter/tables/headroom_aggregate object: headroom_aggr counters: diff --git a/conf/restperf/9.12.0/resource_headroom_cpu.yaml b/conf/restperf/9.12.0/resource_headroom_cpu.yaml index 799766fec..6e6ffa64e 100644 --- a/conf/restperf/9.12.0/resource_headroom_cpu.yaml +++ b/conf/restperf/9.12.0/resource_headroom_cpu.yaml @@ -1,5 +1,5 @@ name: HeadroomCPU -query: /api/cluster/counter/tables/headroom_cpu +query: api/cluster/counter/tables/headroom_cpu object: headroom_cpu counters: diff --git a/docs/ontap-metrics.md b/docs/ontap-metrics.md index 904650fb3..fa78fd696 100644 --- a/docs/ontap-metrics.md +++ b/docs/ontap-metrics.md @@ -7,7 +7,7 @@ These can be generated on demand by running `bin/harvest grafana metrics`. See - More information about ONTAP REST performance counters can be found [here](https://docs.netapp.com/us-en/ontap-pcmap-9121/index.html). ``` -Creation Date : 2023-Oct-05 +Creation Date : 2023-Oct-11 ONTAP Version: 9.13.1 ``` ## Understanding the structure @@ -2595,7 +2595,7 @@ This is the storage aggregate average latency per message at the disk level. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `current_latency`
Unit: microsec
Type: average
Base: current_ops | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `current_latency`
Unit: microsec
Type: average
Base: current_ops | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `current_latency`
Unit: microsec
Type: average
Base: current_ops | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2605,7 +2605,7 @@ Total number of I/Os processed by the aggregate per second. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `current_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `current_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `current_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2615,7 +2615,7 @@ This is the storage aggregate average utilization of all the data disks in the a | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `current_utilization`
Unit: percent
Type: percent
Base: current_utilization_denominator | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `current_utilization`
Unit: percent
Type: percent
Base: current_utilization_denominator | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `current_utilization`
Unit: percent
Type: percent
Base: current_utilization_total | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2625,7 +2625,7 @@ Daily exponential weighted moving average. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `ewma.daily`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `ewma.daily`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `ewma_daily`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2635,7 +2635,7 @@ Hourly exponential weighted moving average. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `ewma.hourly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `ewma.hourly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `ewma_hourly`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2645,7 +2645,7 @@ Monthly exponential weighted moving average. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `ewma.monthly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `ewma.monthly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `ewma_monthly`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2655,7 +2655,7 @@ Weekly exponential weighted moving average. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `ewma.weekly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `ewma.weekly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `ewma_weekly`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2665,7 +2665,7 @@ The confidence factor for the optimal point value based on the observed resource | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `optimal_point.confidence_factor`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `optimal_point.confidence_factor`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `optimal_point_confidence_factor`
Unit: none
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2675,7 +2675,7 @@ The latency component of the optimal point of the latency/utilization curve. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `optimal_point.latency`
Unit: microsec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `optimal_point.latency`
Unit: microsec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `optimal_point_latency`
Unit: microsec
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2685,7 +2685,7 @@ The ops component of the optimal point derived from the latency/utilzation curve | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `optimal_point.ops`
Unit: per_sec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `optimal_point.ops`
Unit: per_sec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `optimal_point_ops`
Unit: per_sec
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2695,7 +2695,7 @@ The utilization component of the optimal point of the latency/utilization curve. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_aggregate` | `optimal_point.utilization`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | +| REST | `api/cluster/counter/tables/headroom_aggregate` | `optimal_point.utilization`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_aggr.yaml | | ZAPI | `perf-object-get-instances resource_headroom_aggr` | `optimal_point_utilization`
Unit: none
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml | @@ -2705,7 +2705,7 @@ Current operation latency of the resource. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `current_latency`
Unit: microsec
Type: average
Base: current_ops | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `current_latency`
Unit: microsec
Type: average
Base: current_ops | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `current_latency`
Unit: microsec
Type: average
Base: current_ops | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2715,7 +2715,7 @@ Total number of operations per second (also referred to as dblade ops). | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `current_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `current_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `current_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2725,7 +2725,7 @@ Average processor utilization across all processors in the system. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `current_utilization`
Unit: percent
Type: percent
Base: elapsed_time | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `current_utilization`
Unit: percent
Type: percent
Base: elapsed_time | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `current_utilization`
Unit: percent
Type: percent
Base: current_utilization_total | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2735,7 +2735,7 @@ Daily exponential weighted moving average for current_ops, optimal_point_ops, cu | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `ewma.daily`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `ewma.daily`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `ewma_daily`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2745,7 +2745,7 @@ Hourly exponential weighted moving average for current_ops, optimal_point_ops, c | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `ewma.hourly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `ewma.hourly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `ewma_hourly`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2755,7 +2755,7 @@ Monthly exponential weighted moving average for current_ops, optimal_point_ops, | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `ewma.monthly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `ewma.monthly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `ewma_monthly`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2765,7 +2765,7 @@ Weekly exponential weighted moving average for current_ops, optimal_point_ops, c | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `ewma.weekly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `ewma.weekly`
Unit: none
Type: raw
Base: | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `ewma_weekly`
Unit: none
Type: raw
Base: | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2775,7 +2775,7 @@ Confidence factor for the optimal point value based on the observed resource lat | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `optimal_point.confidence_factor`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `optimal_point.confidence_factor`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `optimal_point_confidence_factor`
Unit: none
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2785,7 +2785,7 @@ Latency component of the optimal point of the latency/utilization curve. This co | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `optimal_point.latency`
Unit: microsec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `optimal_point.latency`
Unit: microsec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `optimal_point_latency`
Unit: microsec
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2795,7 +2795,7 @@ Ops component of the optimal point derived from the latency/utilization curve. T | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `optimal_point.ops`
Unit: per_sec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `optimal_point.ops`
Unit: per_sec
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `optimal_point_ops`
Unit: per_sec
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -2805,7 +2805,7 @@ Utilization component of the optimal point of the latency/utilization curve. Thi | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `/api/cluster/counter/tables/headroom_cpu` | `optimal_point.utilization`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | +| REST | `api/cluster/counter/tables/headroom_cpu` | `optimal_point.utilization`
Unit: none
Type: average
Base: optimal_point.samples | conf/restperf/9.12.0/resource_headroom_cpu.yaml | | ZAPI | `perf-object-get-instances resource_headroom_cpu` | `optimal_point_utilization`
Unit: none
Type: average
Base: optimal_point_samples | conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml | @@ -4816,7 +4816,7 @@ Total number of NFSv4.2 BIND_CONN_TO_SESSION operations. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_nfs_v42:node` | `bind_conn_to_session.total`
Unit: none
Type: delta
Base: | conf/restperf/9.12.0/nfsv4_2_node.yaml | +| REST | `api/cluster/counter/tables/svm_nfs_v42:node` | `bind_conn_to_session.total`
Unit: none
Type: rate
Base: | conf/restperf/9.12.0/nfsv4_2_node.yaml | | ZAPI | `perf-object-get-instances nfsv4_1:node` | `bind_conn_to_session_total`
Unit: none
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml | @@ -5636,7 +5636,7 @@ Total number of ReadSymLink procedure requests. It is the total number of read s | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_nfs_v3:node` | `read_symlink.total`
Unit: none
Type: delta
Base: | conf/restperf/9.12.0/nfsv3_node.yaml | +| REST | `api/cluster/counter/tables/svm_nfs_v3:node` | `read_symlink.total`
Unit: none
Type: rate
Base: | conf/restperf/9.12.0/nfsv3_node.yaml | | ZAPI | `perf-object-get-instances nfsv3:node` | `read_symlink_total`
Unit: none
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml | @@ -9796,7 +9796,7 @@ Total number of NFSv4.2 BIND_CONN_TO_SESSION operations. | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_nfs_v42` | `bind_conn_to_session.total`
Unit: none
Type: delta
Base: | conf/restperf/9.12.0/nfsv4_2.yaml | +| REST | `api/cluster/counter/tables/svm_nfs_v42` | `bind_conn_to_session.total`
Unit: none
Type: rate
Base: | conf/restperf/9.12.0/nfsv4_2.yaml | | ZAPI | `perf-object-get-instances nfsv4_1` | `bind_conn_to_session_total`
Unit: none
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml | @@ -10616,7 +10616,7 @@ Total number of ReadSymLink procedure requests. It is the total number of read s | API | Endpoint | Metric | Template | |--------|----------|--------|---------| -| REST | `api/cluster/counter/tables/svm_nfs_v3` | `read_symlink.total`
Unit: none
Type: delta
Base: | conf/restperf/9.12.0/nfsv3.yaml | +| REST | `api/cluster/counter/tables/svm_nfs_v3` | `read_symlink.total`
Unit: none
Type: rate
Base: | conf/restperf/9.12.0/nfsv3.yaml | | ZAPI | `perf-object-get-instances nfsv3` | `read_symlink_total`
Unit: none
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/nfsv3.yaml |