diff --git a/cmd/tools/grafana/dashboard_test.go b/cmd/tools/grafana/dashboard_test.go
index b3eea70e0..d89f9831b 100644
--- a/cmd/tools/grafana/dashboard_test.go
+++ b/cmd/tools/grafana/dashboard_test.go
@@ -1713,6 +1713,24 @@ func checkVariablesAreFSxFriendly(t *testing.T, path string, data []byte) {
})
}
+func TestLabelsNullVariables(t *testing.T) {
+ VisitDashboards(cDotDashboards,
+ func(path string, data []byte) {
+ checkVariablesLabelNull(t, path, data)
+ })
+}
+
+func checkVariablesLabelNull(t *testing.T, path string, data []byte) {
+ gjson.GetBytes(data, "templating.list").ForEach(func(key, value gjson.Result) bool {
+ if value.Get("label").Type != gjson.Null && value.Get("label").ClonedString() == "" {
+ varName := value.Get("name").ClonedString()
+ t.Errorf("dashboard=%s path=templating.list[%s]. variable=%s label should not be empty",
+ ShortPath(path), key.ClonedString(), varName)
+ }
+ return true
+ })
+}
+
var linkPath = regexp.MustCompile(`/d/(.*?)/`)
var supportedLinkedObjects = []string{"cluster", "datacenter", "aggr", "svm", "volume", "node", "qtree", "home_node", "tenant"}
var exceptionPathPanelObject = []string{
diff --git a/conf/keyperf/9.15.0/lun.yaml b/conf/keyperf/9.15.0/lun.yaml
new file mode 100644
index 000000000..a2b8ddb4c
--- /dev/null
+++ b/conf/keyperf/9.15.0/lun.yaml
@@ -0,0 +1,39 @@
+name: Lun
+query: api/storage/luns
+object: lun
+
+counters:
+ - ^^uuid => uuid
+ - ^location.volume.name => volume
+ - ^name => path
+ - ^statistics.status => status
+ - ^svm.name => svm
+ - statistics.iops_raw.other => other_ops
+ - statistics.iops_raw.read => read_ops
+ - statistics.iops_raw.total => total_ops
+ - statistics.iops_raw.write => write_ops
+ - statistics.latency_raw.other => other_latency
+ - statistics.latency_raw.read => avg_read_latency
+ - statistics.latency_raw.total => total_latency
+ - statistics.latency_raw.write => avg_write_latency
+ - statistics.throughput_raw.other => other_data
+ - statistics.throughput_raw.read => read_data
+ - statistics.throughput_raw.total => total_data
+ - statistics.throughput_raw.write => write_data
+ - statistics.timestamp(timestamp) => timestamp
+ - hidden_fields:
+ - statistics
+ - filter:
+ - statistics.timestamp=!"-"
+
+plugins:
+ LabelAgent:
+ split_regex:
+ - path `^/[^/]+/([^/]+)(?:/.*?|)/([^/]+)$` volume,lun
+ - path `^([^/]+)$` lun
+
+export_options:
+ instance_keys:
+ - lun
+ - svm
+ - volume
\ No newline at end of file
diff --git a/conf/keyperf/9.15.0/namespace.yaml b/conf/keyperf/9.15.0/namespace.yaml
new file mode 100644
index 000000000..02b176123
--- /dev/null
+++ b/conf/keyperf/9.15.0/namespace.yaml
@@ -0,0 +1,37 @@
+name: Namespace
+query: api/storage/namespaces
+object: namespace
+
+counters:
+ - ^^uuid => uuid
+ - ^name => path
+ - ^statistics.status => status
+ - ^svm.name => svm
+ - statistics.iops_raw.other => other_ops
+ - statistics.iops_raw.read => read_ops
+ - statistics.iops_raw.total => total_ops
+ - statistics.iops_raw.write => write_ops
+ - statistics.latency_raw.other => avg_other_latency
+ - statistics.latency_raw.read => avg_read_latency
+ - statistics.latency_raw.total => avg_total_latency
+ - statistics.latency_raw.write => avg_write_latency
+ - statistics.throughput_raw.read => read_data
+ - statistics.throughput_raw.total => total_data
+ - statistics.throughput_raw.write => write_data
+ - statistics.timestamp(timestamp) => timestamp
+ - hidden_fields:
+ - statistics
+ - filter:
+ - statistics.timestamp=!"-"
+
+plugins:
+ LabelAgent:
+ split:
+ - path `/` ,,volume,namespace
+
+export_options:
+ instance_keys:
+ - namespace
+ - path
+ - svm
+ - volume
\ No newline at end of file
diff --git a/conf/keyperf/default.yaml b/conf/keyperf/default.yaml
index 75145fa96..0d970530c 100644
--- a/conf/keyperf/default.yaml
+++ b/conf/keyperf/default.yaml
@@ -11,6 +11,8 @@ objects:
Cluster: cluster.yaml
FlexCache: flexcache.yaml
LIF: lif.yaml
+ Lun: lun.yaml
+ Namespace: namespace.yaml
NFSv3: nfsv3.yaml
NFSv41: nfsv4_1.yaml
NFSv4: nfsv4.yaml
diff --git a/conf/rest/9.12.0/snapshotpolicy.yaml b/conf/rest/9.12.0/snapshotpolicy.yaml
index be0963694..9a5ccc189 100644
--- a/conf/rest/9.12.0/snapshotpolicy.yaml
+++ b/conf/rest/9.12.0/snapshotpolicy.yaml
@@ -1,5 +1,5 @@
name: SnapshotPolicy
-query: api/storage/snapshot-policies
+query: api/private/cli/volume/snapshot/policy
object: snapshot_policy
counters:
@@ -14,7 +14,7 @@ counters:
plugins:
- SnapshotPolicy
-
+
export_options:
instance_keys:
- snapshot_policy
diff --git a/docs/configure-harvest-basic.md b/docs/configure-harvest-basic.md
index d74c84086..a392ef000 100644
--- a/docs/configure-harvest-basic.md
+++ b/docs/configure-harvest-basic.md
@@ -293,6 +293,9 @@ At runtime, Harvest will invoke the script specified in the `credentials_script`
The script should communicate the credentials to Harvest by writing the response to its standard output (stdout).
Harvest supports two output formats from the script: YAML and plain text.
+When running Harvest inside a container, tools like `jq` and `curl` are not available.
+In such cases, you can use a Go binary as a credential script to fetch authentication information. For details on using a Go binary as a credential script for Harvest container deployment, please refer to the [GitHub discussion](https://github.com/NetApp/harvest/discussions/3380).
+
### YAML format
If the script outputs a YAML object with `username` and `password` keys, Harvest will use both the `username` and `password` from the output. For example, if the script writes the following, Harvest will use `myuser` and `mypassword` for the poller's credentials.
diff --git a/docs/ontap-metrics.md b/docs/ontap-metrics.md
index 2a81282b0..311683e61 100644
--- a/docs/ontap-metrics.md
+++ b/docs/ontap-metrics.md
@@ -455,7 +455,7 @@ Number of user-visible files used in the referenced file system. If the referenc
### aggr_inode_inodefile_private_capacity
-Number of files that can currently be stored on disk for system metadata files. This number will dynamically increase as more system files are created.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.
+Number of files that can currently be stored on disk for system metadata files. This number will dynamically increase as more system files are created.This is an advanced property; there is an added computationl cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -1031,7 +1031,7 @@ The aggregate's volume count, which includes both FlexVols and FlexGroup constit
### aggr_write_data
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -1040,7 +1040,7 @@ Performance metric for write I/O operations.
### aggr_write_latency
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -1049,7 +1049,7 @@ Performance metric for write I/O operations.
### aggr_write_ops
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -1245,7 +1245,7 @@ Performance metric aggregated over all types of I/O operations.
### cluster_write_data
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -1254,7 +1254,7 @@ Performance metric for write I/O operations.
### cluster_write_latency
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -1263,7 +1263,7 @@ Performance metric for write I/O operations.
### cluster_write_ops
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -3683,6 +3683,7 @@ Average read latency in microseconds for all operations on the LUN
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/lun` | `average_read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/restperf/9.12.0/lun.yaml |
+| KeyPerf | `api/storage/luns` | `statistics.latency_raw.read`
Unit: microsec
Type: average
Base: lun_statistics.iops_raw.read | conf/keyperf/9.15.0/lun.yaml |
| ZAPI | `perf-object-get-instances lun` | `avg_read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/zapiperf/cdot/9.8.0/lun.yaml |
@@ -3693,6 +3694,7 @@ Average write latency in microseconds for all operations on the LUN
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/lun` | `average_write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/restperf/9.12.0/lun.yaml |
+| KeyPerf | `api/storage/luns` | `statistics.latency_raw.write`
Unit: microsec
Type: average
Base: lun_statistics.iops_raw.write | conf/keyperf/9.15.0/lun.yaml |
| ZAPI | `perf-object-get-instances lun` | `avg_write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/zapiperf/cdot/9.8.0/lun.yaml |
@@ -3736,6 +3738,33 @@ This metric indicates a value of 1 if the LUN state is online (indicating the LU
| ZAPI | `NA` | `Harvest generated` | conf/zapi/cdot/9.8.0/lun.yaml |
+### lun_other_data
+
+Performance metric for other I/O operations. Other I/O operations can be metadata operations, such as directory lookups and so on.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/luns` | `statistics.throughput_raw.other`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
+
+
+### lun_other_latency
+
+Performance metric for other I/O operations. Other I/O operations can be metadata operations, such as directory lookups and so on.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/luns` | `statistics.latency_raw.other`
Unit: microsec
Type: average
Base: lun_statistics.iops_raw.other | conf/keyperf/9.15.0/lun.yaml |
+
+
+### lun_other_ops
+
+Performance metric for other I/O operations. Other I/O operations can be metadata operations, such as directory lookups and so on.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/luns` | `statistics.iops_raw.other`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
+
+
### lun_queue_full
Queue full responses
@@ -3763,6 +3792,7 @@ Read bytes
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/lun` | `read_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/lun.yaml |
+| KeyPerf | `api/storage/luns` | `statistics.throughput_raw.read`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
| ZAPI | `perf-object-get-instances lun` | `read_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/lun.yaml |
@@ -3773,6 +3803,7 @@ Number of read operations
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/lun` | `read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/lun.yaml |
+| KeyPerf | `api/storage/luns` | `statistics.iops_raw.read`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
| ZAPI | `perf-object-get-instances lun` | `read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/lun.yaml |
@@ -3836,6 +3867,33 @@ This metric represents the percentage of a LUN that is currently being used.
| ZAPI | `lun-get-iter` | `size_used, size` | conf/zapi/cdot/9.8.0/lun.yaml |
+### lun_total_data
+
+Performance metric aggregated over all types of I/O operations.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/luns` | `statistics.throughput_raw.total`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
+
+
+### lun_total_latency
+
+Performance metric aggregated over all types of I/O operations.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/luns` | `statistics.latency_raw.total`
Unit: microsec
Type: average
Base: lun_statistics.iops_raw.total | conf/keyperf/9.15.0/lun.yaml |
+
+
+### lun_total_ops
+
+Performance metric aggregated over all types of I/O operations.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/luns` | `statistics.iops_raw.total`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
+
+
### lun_unmap_reqs
Number of unmap command requests
@@ -3863,6 +3921,7 @@ Write bytes
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/lun` | `write_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/lun.yaml |
+| KeyPerf | `api/storage/luns` | `statistics.throughput_raw.write`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
| ZAPI | `perf-object-get-instances lun` | `write_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/lun.yaml |
@@ -3873,6 +3932,7 @@ Number of write operations
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/lun` | `write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/lun.yaml |
+| KeyPerf | `api/storage/luns` | `statistics.iops_raw.write`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/lun.yaml |
| ZAPI | `perf-object-get-instances lun` | `write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.8.0/lun.yaml |
@@ -4159,6 +4219,7 @@ Average other ops latency in microseconds for all operations on the Namespace
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `average_other_latency`
Unit: microsec
Type: average
Base: other_ops | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.latency_raw.other`
Unit: microsec
Type: average
Base: namespace_statistics.iops_raw.other | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `avg_other_latency`
Unit: microsec
Type: average
Base: other_ops | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -4169,9 +4230,19 @@ Average read latency in microseconds for all operations on the Namespace
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `average_read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.latency_raw.read`
Unit: microsec
Type: average
Base: namespace_statistics.iops_raw.read | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `avg_read_latency`
Unit: microsec
Type: average
Base: read_ops | conf/zapiperf/cdot/9.10.1/namespace.yaml |
+### namespace_avg_total_latency
+
+Performance metric aggregated over all types of I/O operations.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/namespaces` | `statistics.latency_raw.total`
Unit: microsec
Type: average
Base: namespace_statistics.iops_raw.total | conf/keyperf/9.15.0/namespace.yaml |
+
+
### namespace_avg_write_latency
Average write latency in microseconds for all operations on the Namespace
@@ -4179,6 +4250,7 @@ Average write latency in microseconds for all operations on the Namespace
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `average_write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.latency_raw.write`
Unit: microsec
Type: average
Base: namespace_statistics.iops_raw.write | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `avg_write_latency`
Unit: microsec
Type: average
Base: write_ops | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -4199,6 +4271,7 @@ Number of other operations
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `other_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.iops_raw.other`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `other_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -4209,6 +4282,7 @@ Read bytes
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `read_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.throughput_raw.read`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `read_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -4219,6 +4293,7 @@ Number of read operations
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `read_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.iops_raw.read`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `read_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -4282,6 +4357,24 @@ The amount of space consumed by the main data stream of the NVMe namespace.
| ZAPI | `nvme-namespace-get-iter` | `nvme-namespace-info.size-used` | conf/zapi/cdot/9.8.0/namespace.yaml |
+### namespace_total_data
+
+Performance metric aggregated over all types of I/O operations.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/namespaces` | `statistics.throughput_raw.total`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
+
+
+### namespace_total_ops
+
+Performance metric aggregated over all types of I/O operations.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| KeyPerf | `api/storage/namespaces` | `statistics.iops_raw.total`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
+
+
### namespace_write_data
Write bytes
@@ -4289,6 +4382,7 @@ Write bytes
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `write_data`
Unit: b_per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.throughput_raw.write`
Unit: b_per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `write_data`
Unit: b_per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -4299,6 +4393,7 @@ Number of write operations
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/namespace` | `write_ops`
Unit: per_sec
Type: rate
Base: | conf/restperf/9.12.0/namespace.yaml |
+| KeyPerf | `api/storage/namespaces` | `statistics.iops_raw.write`
Unit: per_sec
Type: rate
Base: | conf/keyperf/9.15.0/namespace.yaml |
| ZAPI | `perf-object-get-instances namespace` | `write_ops`
Unit: per_sec
Type: rate
Base: | conf/zapiperf/cdot/9.10.1/namespace.yaml |
@@ -10427,7 +10522,7 @@ Summation of NFS ops, CIFS ops, CSS ops and internal ops
### qtree_write_data
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -10436,7 +10531,7 @@ Performance metric for write I/O operations.
### qtree_write_ops
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -10739,7 +10834,7 @@ Array of number of give-ups of CIFS ops because they rewind more than a certain
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/rewind_context` | `cifs_give_ups`
Unit: none
Type: delta
Base: | conf/restperf/9.16.0/rwctx.yaml |
-| ZAPI | `perf-object-get-instances rw_ctx` | `cifs_giveups`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
+| ZAPI | `perf-object-get-instances rw_ctx` | `cifs_giveups`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
### rw_ctx_cifs_rewinds
@@ -10749,7 +10844,7 @@ Array of number of rewinds for CIFS ops based on their reasons.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/rewind_context` | `cifs_rewinds`
Unit: none
Type: delta
Base: | conf/restperf/9.16.0/rwctx.yaml |
-| ZAPI | `perf-object-get-instances rw_ctx` | `cifs_rewinds`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
+| ZAPI | `perf-object-get-instances rw_ctx` | `cifs_rewinds`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
### rw_ctx_nfs_giveups
@@ -10759,7 +10854,7 @@ Array of number of give-ups of NFS ops because they rewind more than a certain t
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/rewind_context` | `nfs_give_ups`
Unit: none
Type: delta
Base: | conf/restperf/9.16.0/rwctx.yaml |
-| ZAPI | `perf-object-get-instances rw_ctx` | `nfs_giveups`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
+| ZAPI | `perf-object-get-instances rw_ctx` | `nfs_giveups`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
### rw_ctx_nfs_rewinds
@@ -10769,7 +10864,7 @@ Array of number of rewinds for NFS ops based on their reasons.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
| REST | `api/cluster/counter/tables/rewind_context` | `nfs_rewinds`
Unit: none
Type: delta
Base: | conf/restperf/9.16.0/rwctx.yaml |
-| ZAPI | `perf-object-get-instances rw_ctx` | `nfs_rewinds`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
+| ZAPI | `perf-object-get-instances rw_ctx` | `nfs_rewinds`
Unit: none
Type: delta
Base: | conf/zapiperf/cdot/9.8.0/rwctx.yaml |
### rw_ctx_qos_flowcontrol
@@ -11414,7 +11509,7 @@ Total Number of Schedules in this Policy
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
-| REST | `api/private/cli/snapshot/policy` | `total_schedules` | conf/rest/9.12.0/snapshotpolicy.yaml |
+| REST | `api/private/cli/volume/snapshot/policy` | `total_schedules` | conf/rest/9.12.0/snapshotpolicy.yaml |
| ZAPI | `snapshot-policy-get-iter` | `snapshot-policy-info.total-schedules` | conf/zapi/cdot/9.8.0/snapshotpolicy.yaml |
@@ -11557,7 +11652,7 @@ Performance metric aggregated over all types of I/O operations.
### svm_cifs_write_data
-Performance metric for write I/O operations.
+Peformance metric for write I/O operations.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -14928,7 +15023,7 @@ Performance metric aggregated over all types of I/O operations. svm_volume_total
### svm_volume_write_data
-Performance metric for write I/O operations. svm_volume_write_data is [volume_write_data](#volume_write_data) aggregated by `svm`.
+Peformance metric for write I/O operations. svm_volume_write_data is [volume_write_data](#volume_write_data) aggregated by `svm`.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -14937,7 +15032,7 @@ Performance metric for write I/O operations. svm_volume_write_data is [volume_wr
### svm_volume_write_latency
-Performance metric for write I/O operations. svm_volume_write_latency is [volume_write_latency](#volume_write_latency) aggregated by `svm`.
+Peformance metric for write I/O operations. svm_volume_write_latency is [volume_write_latency](#volume_write_latency) aggregated by `svm`.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
@@ -14946,7 +15041,7 @@ Performance metric for write I/O operations. svm_volume_write_latency is [volume
### svm_volume_write_ops
-Performance metric for write I/O operations. svm_volume_write_ops is [volume_write_ops](#volume_write_ops) aggregated by `svm`.
+Peformance metric for write I/O operations. svm_volume_write_ops is [volume_write_ops](#volume_write_ops) aggregated by `svm`.
| API | Endpoint | Metric | Template |
|--------|----------|--------|---------|
diff --git a/docs/prepare-cdot-clusters.md b/docs/prepare-cdot-clusters.md
index df84efb83..cc74ff16e 100644
--- a/docs/prepare-cdot-clusters.md
+++ b/docs/prepare-cdot-clusters.md
@@ -235,6 +235,7 @@ security login rest-role create -role harvest2-rest-role -access readonly -api /
security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/workload
security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qtree
security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/snapmirror
+ security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/volume/snapshot/policy
security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/storage/failover
security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/storage/shelf
security login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/system/chassis/fru
diff --git a/grafana/dashboards/cmode/cdot.json b/grafana/dashboards/cmode/cdot.json
index 2028603bc..be7a9711e 100644
--- a/grafana/dashboards/cmode/cdot.json
+++ b/grafana/dashboards/cmode/cdot.json
@@ -2054,7 +2054,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": true,
"name": "Datacenter",
"options": [],
@@ -2080,7 +2080,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Cluster",
"options": [],
@@ -2106,7 +2106,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "SVM",
"options": [],
@@ -2132,7 +2132,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Volume",
"options": [],
@@ -2251,7 +2251,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": false,
"name": "IncludeRoot",
"options": [
diff --git a/grafana/dashboards/cmode/changelogMonitor.json b/grafana/dashboards/cmode/changelogMonitor.json
index 1057fb049..929540e99 100644
--- a/grafana/dashboards/cmode/changelogMonitor.json
+++ b/grafana/dashboards/cmode/changelogMonitor.json
@@ -1473,7 +1473,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": true,
"name": "Datacenter",
"options": [],
@@ -1499,7 +1499,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Cluster",
"options": [],
diff --git a/grafana/dashboards/cmode/cluster.json b/grafana/dashboards/cmode/cluster.json
index 6f1ffb3df..7f7d0685b 100644
--- a/grafana/dashboards/cmode/cluster.json
+++ b/grafana/dashboards/cmode/cluster.json
@@ -5107,7 +5107,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": true,
"name": "Datacenter",
"options": [],
@@ -5131,7 +5131,7 @@
"definition": "label_values(cluster_tags{system_type!=\"7mode\", datacenter=~\"$Datacenter\"},tag)",
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Tag",
"options": [],
@@ -5157,7 +5157,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Cluster",
"options": [],
diff --git a/grafana/dashboards/cmode/datacenter.json b/grafana/dashboards/cmode/datacenter.json
index 57aca3ccb..2c4d60d57 100644
--- a/grafana/dashboards/cmode/datacenter.json
+++ b/grafana/dashboards/cmode/datacenter.json
@@ -3954,7 +3954,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": true,
"name": "Datacenter",
"options": [],
@@ -3980,7 +3980,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Cluster",
"options": [],
diff --git a/grafana/dashboards/cmode/disk.json b/grafana/dashboards/cmode/disk.json
index b1673ade9..94e813261 100644
--- a/grafana/dashboards/cmode/disk.json
+++ b/grafana/dashboards/cmode/disk.json
@@ -65,7 +65,7 @@
"gnetId": null,
"graphTooltip": 1,
"id": null,
- "iteration": 1715672476697,
+ "iteration": 1733915992490,
"links": [
{
"asDropdown": true,
@@ -2410,7 +2410,7 @@
"h": 9,
"w": 8,
"x": 0,
- "y": 48
+ "y": 32
},
"id": 32,
"options": {
@@ -2446,7 +2446,7 @@
],
"timeFrom": null,
"timeShift": null,
- "title": "Disk Throughput",
+ "title": "Disk Throughput by Node",
"transformations": [],
"type": "timeseries"
},
@@ -2508,7 +2508,7 @@
"h": 9,
"w": 8,
"x": 8,
- "y": 48
+ "y": 32
},
"id": 34,
"options": {
@@ -2544,13 +2544,13 @@
],
"timeFrom": null,
"timeShift": null,
- "title": "Disk and Tape Drives Throughput",
+ "title": "Disk and Tape Drives Throughput by Node",
"transformations": [],
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
- "description": "This panel displays detail of array of counts of different types of Consistency Points (CP) and average latency in microseconds for the WAFL filesystem to process write request to the volume.",
+ "description": "This panel displays detail of bytes written/read through a host adapter.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2598,58 +2598,17 @@
}
]
},
- "unit": "µs"
+ "unit": "binBps"
},
- "overrides": [
- {
- "matcher": {
- "id": "byFrameRefID",
- "options": "A"
- },
- "properties": [
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "custom.axisLabel",
- "value": "Back to Back CP Count"
- },
- {
- "id": "unit",
- "value": "locale"
- }
- ]
- },
- {
- "matcher": {
- "id": "byFrameRefID",
- "options": "B"
- },
- "properties": [
- {
- "id": "custom.axisPlacement",
- "value": "left"
- },
- {
- "id": "custom.axisLabel",
- "value": "Write Latency"
- },
- {
- "id": "unit",
- "value": "µs"
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
"h": 9,
"w": 8,
"x": 16,
- "y": 48
+ "y": 32
},
- "id": 40,
+ "id": 62,
"options": {
"legend": {
"calcs": [
@@ -2667,23 +2626,25 @@
"pluginVersion": "8.1.8",
"targets": [
{
- "expr": "sum(wafl_cp_count{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",node=~\"$Node\",metric=~\"back_to_back_CP|deferred_back_to_back_CP\"})",
+ "exemplar": false,
+ "expr": "(\n sum by (datacenter,cluster,node,hostadapter) (hostadapter_bytes_written{datacenter=~\"$Datacenter\", cluster=~\"$Cluster\",node=~\"$Node\",hostadapter=~\"$HostAdapter\"})\n)\nand on(datacenter,cluster,node,hostadapter)\ntopk(\n $TopResources,\n sum by (datacenter,cluster,node,hostadapter) (\n avg_over_time(hostadapter_bytes_written{datacenter=~\"$Datacenter\", cluster=~\"$Cluster\",node=~\"$Node\",hostadapter=~\"$HostAdapter\"}[3h])\n )\n)",
"hide": false,
"interval": "",
- "legendFormat": "Back-to-back CP Count",
+ "legendFormat": "{{node}} - {{hostadapter}} - WRITE",
"refId": "A"
},
{
- "expr": "node_vol_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",node=~\"$Node\"}",
+ "exemplar": false,
+ "expr": "(\n sum by (datacenter,cluster,node,hostadapter) (hostadapter_bytes_read{datacenter=~\"$Datacenter\", cluster=~\"$Cluster\",node=~\"$Node\",hostadapter=~\"$HostAdapter\"})\n)\nand on(datacenter,cluster,node,hostadapter)\ntopk(\n $TopResources,\n sum by (datacenter,cluster,node,hostadapter) (\n avg_over_time(hostadapter_bytes_read{datacenter=~\"$Datacenter\", cluster=~\"$Cluster\",node=~\"$Node\",hostadapter=~\"$HostAdapter\"}[3h])\n )\n)",
"hide": false,
"interval": "",
- "legendFormat": "Write Latency {{node}}",
+ "legendFormat": "{{node}} - {{hostadapter}} - READ",
"refId": "B"
}
],
"timeFrom": null,
"timeShift": null,
- "title": "CP (Consistency Points) Counts",
+ "title": "Top $TopResources Disk and Tape Drives Throughput by Host Adapter",
"transformations": [],
"type": "timeseries"
},
@@ -2777,7 +2738,7 @@
"h": 9,
"w": 8,
"x": 0,
- "y": 57
+ "y": 41
},
"id": 36,
"options": {
@@ -2907,7 +2868,7 @@
"h": 9,
"w": 8,
"x": 8,
- "y": 57
+ "y": 41
},
"id": 38,
"options": {
@@ -2946,9 +2907,149 @@
"title": "Flash Pool",
"transformations": [],
"type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "This panel displays detail of array of counts of different types of Consistency Points (CP) and average latency in microseconds for the WAFL filesystem to process write request to the volume.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "µs"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byFrameRefID",
+ "options": "A"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "custom.axisLabel",
+ "value": "Back to Back CP Count"
+ },
+ {
+ "id": "unit",
+ "value": "locale"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byFrameRefID",
+ "options": "B"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "left"
+ },
+ {
+ "id": "custom.axisLabel",
+ "value": "Write Latency"
+ },
+ {
+ "id": "unit",
+ "value": "µs"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 16,
+ "y": 41
+ },
+ "id": 40,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom"
+ },
+ "tooltip": {
+ "mode": "single"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "sum(wafl_cp_count{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",node=~\"$Node\",metric=~\"back_to_back_CP|deferred_back_to_back_CP\"})",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "Back-to-back CP Count",
+ "refId": "A"
+ },
+ {
+ "expr": "node_vol_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",node=~\"$Node\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "Write Latency {{node}}",
+ "refId": "B"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CP (Consistency Points) Counts",
+ "transformations": [],
+ "type": "timeseries"
}
],
- "title": "Disk Utilization: Node-level Overview",
+ "title": "Disk Utilization",
"type": "row"
}
],
@@ -3049,7 +3150,7 @@
"options": [],
"query": {
"query": "label_values(disk_labels{system_type!=\"7mode\",cluster=~\"$Cluster\"}, node)",
- "refId": "Prometheus-Node-Variable-Query"
+ "refId": "StandardVariableQuery"
},
"refresh": 2,
"regex": "",
@@ -3075,7 +3176,7 @@
"options": [],
"query": {
"query": "label_values(aggr_disk_busy{cluster=~\"$Cluster\",node=~\"$Node\"}, aggr)",
- "refId": "Prometheus-Aggregate-Variable-Query"
+ "refId": "StandardVariableQuery"
},
"refresh": 2,
"regex": "",
@@ -3138,6 +3239,29 @@
"type": "query",
"useTags": false
},
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": "${DS_PROMETHEUS}",
+ "definition": "label_values(hostadapter_bytes_read{system_type!=\"7mode\",cluster=~\"$Cluster\",node=~\"$Node\"}, hostadapter)",
+ "description": null,
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": true,
+ "name": "HostAdapter",
+ "options": [],
+ "query": {
+ "query": "label_values(hostadapter_bytes_read{system_type!=\"7mode\",cluster=~\"$Cluster\",node=~\"$Node\"}, hostadapter)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "type": "query"
+ },
{
"allValue": null,
"current": {
@@ -3251,5 +3375,5 @@
"timezone": "",
"title": "ONTAP: Disk",
"uid": "cdot-disk",
- "version": 32
+ "version": 15
}
diff --git a/grafana/dashboards/cmode/health.json b/grafana/dashboards/cmode/health.json
index e4fbb722f..36bcedff2 100644
--- a/grafana/dashboards/cmode/health.json
+++ b/grafana/dashboards/cmode/health.json
@@ -4436,7 +4436,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": true,
"name": "Datacenter",
"options": [],
@@ -4462,7 +4462,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Cluster",
"options": [],
diff --git a/grafana/dashboards/cmode/mcc_cluster.json b/grafana/dashboards/cmode/mcc_cluster.json
index 35eac2043..9bc844361 100644
--- a/grafana/dashboards/cmode/mcc_cluster.json
+++ b/grafana/dashboards/cmode/mcc_cluster.json
@@ -4241,7 +4241,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": false,
"name": "TopResources",
"options": [
diff --git a/grafana/dashboards/cmode/metadata.json b/grafana/dashboards/cmode/metadata.json
index 61eb388fe..10740ba0b 100644
--- a/grafana/dashboards/cmode/metadata.json
+++ b/grafana/dashboards/cmode/metadata.json
@@ -3204,7 +3204,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Hostname",
"options": [],
diff --git a/grafana/dashboards/cmode/network.json b/grafana/dashboards/cmode/network.json
index 3df2b900e..5f2a21858 100644
--- a/grafana/dashboards/cmode/network.json
+++ b/grafana/dashboards/cmode/network.json
@@ -4924,7 +4924,7 @@
"error": null,
"hide": 0,
"includeAll": false,
- "label": "",
+ "label": null,
"multi": true,
"name": "Datacenter",
"options": [],
@@ -4950,7 +4950,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Cluster",
"options": [],
@@ -4976,7 +4976,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Node",
"options": [],
@@ -5002,7 +5002,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "Eth",
"options": [],
@@ -5028,7 +5028,7 @@
"error": null,
"hide": 0,
"includeAll": true,
- "label": "",
+ "label": null,
"multi": true,
"name": "FCP",
"options": [],
diff --git a/integration/Jenkinsfile b/integration/Jenkinsfile
index caf6e8d25..375d643b6 100644
--- a/integration/Jenkinsfile
+++ b/integration/Jenkinsfile
@@ -301,6 +301,7 @@ def void setupWorkspace() {
git clone --single-branch --branch $BRANCH https://github.com/NetApp/harvest.git
cp /u/mpeg/harvest/harvest_cert.yml $WORKSPACE/harvest/integration/test/
cp /u/mpeg/harvest/harvest.yml $WORKSPACE/harvest/integration/test/
+ cp /u/mpeg/harvest/harvest_admin.yml $WORKSPACE/harvest/integration/test/
ls -ltr $WORKSPACE/harvest/integration/test/
'''
}
diff --git a/integration/go.mod b/integration/go.mod
index 846c08c1a..ea81aed0c 100644
--- a/integration/go.mod
+++ b/integration/go.mod
@@ -7,9 +7,9 @@ toolchain go1.23.0
replace github.com/netapp/harvest/v2 => ../
require (
- github.com/carlmjohnson/requests v0.24.2
- github.com/netapp/harvest/v2 v2.0.0-20241001142758-f137ab714dd8
- golang.org/x/text v0.20.0
+ github.com/carlmjohnson/requests v0.24.3
+ github.com/netapp/harvest/v2 v2.0.0-20241217100501-6f5c2ad8ca3a
+ golang.org/x/text v0.21.0
)
require (
@@ -31,11 +31,11 @@ require (
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
- github.com/tklauser/numcpus v0.8.0 // indirect
+ github.com/tklauser/numcpus v0.9.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- golang.org/x/net v0.29.0 // indirect
- golang.org/x/sys v0.27.0 // indirect
- golang.org/x/term v0.26.0 // indirect
+ golang.org/x/net v0.32.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/integration/go.sum b/integration/go.sum
index 08404c137..e654d8144 100644
--- a/integration/go.sum
+++ b/integration/go.sum
@@ -1,5 +1,5 @@
-github.com/carlmjohnson/requests v0.24.2 h1:JDakhAmTIKL/qL/1P7Kkc2INGBJIkIFP6xUeUmPzLso=
-github.com/carlmjohnson/requests v0.24.2/go.mod h1:duYA/jDnyZ6f3xbcF5PpZ9N8clgopubP2nK5i6MVMhU=
+github.com/carlmjohnson/requests v0.24.3 h1:LYcM/jVIVPkioigMjEAnBACXl2vb42TVqiC8EYNoaXQ=
+github.com/carlmjohnson/requests v0.24.3/go.mod h1:duYA/jDnyZ6f3xbcF5PpZ9N8clgopubP2nK5i6MVMhU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -53,21 +53,21 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
-github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
-github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
+github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
+github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
-golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
+golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
+golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
-golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
-golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
-golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
-golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/integration/test/copy_logs_test.go b/integration/test/copy_logs_test.go
index 0fd161953..d151ea936 100644
--- a/integration/test/copy_logs_test.go
+++ b/integration/test/copy_logs_test.go
@@ -93,5 +93,5 @@ func checkLogs(t *testing.T, container docker.Container, info containerInfo) {
// pollerIgnore returns a list of regex patterns that will be ignored
func pollerIgnore() string {
- return `RPC: Remote system error|connection error|Code: 2426405|failed to fetch data: error making request StatusCode: 403, Error: Permission denied, Message: not authorized for that command, API: (/api/private/cli/snapshot/policy|/api/support/autosupport)`
+ return `RPC: Remote system error|connection error|Code: 2426405|failed to fetch data: error making request StatusCode: 403, Error: Permission denied, Message: not authorized for that command, API: (/api/support/autosupport)`
}
diff --git a/integration/test/counter_test.go b/integration/test/counter_test.go
index d9ba8dec6..43ca2583a 100644
--- a/integration/test/counter_test.go
+++ b/integration/test/counter_test.go
@@ -36,7 +36,6 @@ var skipTemplates = map[string]bool{
}
var skipEndpoints = []string{
- "api/private/cli/snapshot/policy",
"api/support/autosupport",
"api/private/cli/export-policy/rule",
}
@@ -47,14 +46,12 @@ func TestCounters(t *testing.T) {
var (
poller *conf.Poller
client *rest2.Client
+ err error
)
utils.SkipIfMissing(t, utils.Regression)
- _, err := conf.LoadHarvestConfig(installer.HarvestConfigFile)
- if err != nil {
- slog.Error("Unable to load harvest config", slogx.Err(err))
- os.Exit(1)
- }
+ validateRolePermissions()
+ conf.TestLoadHarvestConfig(installer.HarvestConfigFile)
pollerName := "dc1"
if poller, err = conf.PollerNamed(pollerName); err != nil {
@@ -89,6 +86,58 @@ func TestCounters(t *testing.T) {
}
+func validateRolePermissions() {
+ var (
+ adminPoller *conf.Poller
+ adminClient *rest2.Client
+ err error
+ )
+
+ // Load the admin poller from harvest_admin.yml
+ conf.TestLoadHarvestConfig(installer.HarvestAdminConfigFile)
+
+ pollerName := "dc1-admin"
+ if adminPoller, err = conf.PollerNamed(pollerName); err != nil {
+ slog.Error("unable to find poller", slogx.Err(err), slog.String("poller", pollerName))
+ os.Exit(1)
+ }
+ if adminPoller.Addr == "" {
+ slog.Error("admin poller address is empty", slog.String("poller", pollerName))
+ os.Exit(1)
+ }
+
+ timeout, _ := time.ParseDuration(rest2.DefaultTimeout)
+ if adminClient, err = rest2.New(adminPoller, timeout, auth.NewCredentials(adminPoller, slog.Default())); err != nil {
+ slog.Error("error creating new admin client", slogx.Err(err), slog.String("poller", pollerName))
+ os.Exit(1)
+ }
+
+ if err = adminClient.Init(5, conf.Remote{}); err != nil {
+ slog.Error("admin client init failed", slogx.Err(err), slog.String("poller", pollerName))
+ os.Exit(1)
+ }
+
+ apiEndpoint := "api/private/cli/security/login/rest-role"
+ href := rest2.NewHrefBuilder().
+ APIPath(apiEndpoint).
+ Filter([]string{"role=harvest-rest-role", "api=/api/private/cli"}).
+ Build()
+
+ response, err := collectors.InvokeRestCall(adminClient, href)
+ if err != nil {
+ slog.Error("failed to invoke admin rest call", slogx.Err(err), slog.String("endpoint", apiEndpoint))
+ os.Exit(1)
+ }
+
+ for _, instanceData := range response {
+ api := instanceData.Get("api")
+ if api.Exists() {
+ slog.Error("unexpected 'api' field found in the response data; permissions for /api/private/cli should not be present")
+ os.Exit(1)
+ }
+ }
+}
+
func invokeRestCall(client *rest2.Client, counters map[string][]counterData) error {
for _, countersDetail := range counters {
for _, counterDetail := range countersDetail {
diff --git a/integration/test/installer/installerFactory.go b/integration/test/installer/installerFactory.go
index 93de7bf86..af394cd21 100644
--- a/integration/test/installer/installerFactory.go
+++ b/integration/test/installer/installerFactory.go
@@ -5,11 +5,12 @@ import (
)
const (
- RHEL = "rpm"
- NATIVE = "tar"
- HarvestConfigFile = "harvest.yml"
- GRAFANA = "grafana"
- PROMETHEUS = "prometheus"
+ RHEL = "rpm"
+ NATIVE = "tar"
+ HarvestConfigFile = "harvest.yml"
+ HarvestAdminConfigFile = "harvest_admin.yml"
+ GRAFANA = "grafana"
+ PROMETHEUS = "prometheus"
)
func GetInstaller(installType string, path string) (Installer, error) {