diff --git a/cmd/collectors/restperf/plugins/volumetopclients/testdata/readdata.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_readdata.json similarity index 100% rename from cmd/collectors/restperf/plugins/volumetopclients/testdata/readdata.json rename to cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_readdata.json diff --git a/cmd/collectors/restperf/plugins/volumetopclients/testdata/readops.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_readops.json similarity index 100% rename from cmd/collectors/restperf/plugins/volumetopclients/testdata/readops.json rename to cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_readops.json diff --git a/cmd/collectors/restperf/plugins/volumetopclients/testdata/writedata.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_writedata.json similarity index 100% rename from cmd/collectors/restperf/plugins/volumetopclients/testdata/writedata.json rename to cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_writedata.json diff --git a/cmd/collectors/restperf/plugins/volumetopclients/testdata/writeops.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_writeops.json similarity index 100% rename from cmd/collectors/restperf/plugins/volumetopclients/testdata/writeops.json rename to cmd/collectors/restperf/plugins/volumetopmetrics/testdata/client_writeops.json diff --git a/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_readdata.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_readdata.json new file mode 100644 index 000000000..f41907e57 --- /dev/null +++ b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_readdata.json @@ -0,0 +1,43 @@ +{ + "records": [ + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "throughput": { + "read": 65470464, + "error": { + "lower_bound": 65470464, + "upper_bound": 65470464 + } + }, + "path": "/harvest-rhel8-1/harvest-rhel8-1_6-flat.vmdk", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/harvest-rhel8-1%2Fharvest-rhel8-1_6-flat%2Evmdk?return_metadata=true" + } + } + } + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/volumes/*/top-metrics/files?top_metric=throughput.read&volume.name=osc_vol01" + } + } +} \ No newline at end of file diff --git a/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_readops.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_readops.json new file mode 100644 index 000000000..5c729158b --- /dev/null +++ b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_readops.json @@ -0,0 +1,43 @@ +{ + "records": [ + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "read": 350, + "error": { + "lower_bound": 350, + "upper_bound": 350 + } + }, + "path": "/harvest-rhel8-1/harvest-rhel8-1_9-flat.vmdk", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/harvest-rhel8-1%2Fharvest-rhel8-1_9-flat%2Evmdk?return_metadata=true" + } + } + } + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/volumes/*/top-metrics/files?top_metric=iops.read&volume.name=osc_vol01" + } + } +} \ No newline at end of file diff --git a/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_writedata.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_writedata.json new file mode 100644 index 000000000..2f9ee6138 --- /dev/null +++ b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_writedata.json @@ -0,0 +1,43 @@ +{ + "records": [ + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "throughput": { + "write": 819, + "error": { + "lower_bound": 819, + "upper_bound": 819 + } + }, + "path": "/.vSphere-HA/FDM-cf816f20-89e8-4f6a-92cb-3aee5ededaf9-1012-b20b14c-harvestvc/.lck-c5009be600000000", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/%2EvSphere-HA%2FFDM-cf816f20-89e8-4f6a-92cb-3aee5ededaf9-1012-b20b14c-harvestvc%2F%2Elck-c5009be600000000?return_metadata=true" + } + } + } + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/volumes/*/top-metrics/files?top_metric=throughput.write&volume.name=osc_vol01" + } + } +} \ No newline at end of file diff --git a/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_writeops.json b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_writeops.json new file mode 100644 index 000000000..c13dfcea2 --- /dev/null +++ b/cmd/collectors/restperf/plugins/volumetopmetrics/testdata/file_writeops.json @@ -0,0 +1,208 @@ +{ + "records": [ + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "write": 1, + "error": { + "lower_bound": 1, + "upper_bound": 1 + } + }, + "path": "/.vSphere-HA/FDM-cf816f20-89e8-4f6a-92cb-3aee5ededaf9-1012-b20b14c-harvestvc/host-1291-hb", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/%2EvSphere-HA%2FFDM-cf816f20-89e8-4f6a-92cb-3aee5ededaf9-1012-b20b14c-harvestvc%2Fhost-1291-hb?return_metadata=true" + } + } + }, + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "write": 1, + "error": { + "lower_bound": 1, + "upper_bound": 1 + } + }, + "path": "/ci-io-load-ubuntu/.lck-7c009be600000000", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/ci-io-load-ubuntu%2F%2Elck-7c009be600000000?return_metadata=true" + } + } + }, + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "write": 1, + "error": { + "lower_bound": 1, + "upper_bound": 1 + } + }, + "path": "/ci-io-load-ubuntu/.lck-b1009be600000000", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/ci-io-load-ubuntu%2F%2Elck-b1009be600000000?return_metadata=true" + } + } + }, + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "write": 1, + "error": { + "lower_bound": 1, + "upper_bound": 1 + } + }, + "path": "/ci-io-load-ubuntu/.lck-be009be600000000", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/ci-io-load-ubuntu%2F%2Elck-be009be600000000?return_metadata=true" + } + } + }, + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "write": 1, + "error": { + "lower_bound": 1, + "upper_bound": 1 + } + }, + "path": "/ci-io-load-ubuntu/.lck-c2719be600000000", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/ci-io-load-ubuntu%2F%2Elck-c2719be600000000?return_metadata=true" + } + } + }, + { + "volume": { + "uuid": "03613247-54ac-4127-a512-afb331f1c207", + "name": "osc_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207" + } + } + }, + "iops": { + "write": 1, + "error": { + "lower_bound": 1, + "upper_bound": 1 + } + }, + "path": "/ci-io-load-ubuntu/.lck-c3719be600000000", + "svm": { + "uuid": "ff82ac73-d7f8-11eb-80e3-00a098d39e12", + "name": "osc", + "_links": { + "self": { + "href": "/api/svm/svms/ff82ac73-d7f8-11eb-80e3-00a098d39e12" + } + } + }, + "_links": { + "metadata": { + "href": "/api/storage/volumes/03613247-54ac-4127-a512-afb331f1c207/files/ci-io-load-ubuntu%2F%2Elck-c3719be600000000?return_metadata=true" + } + } + } + ], + "num_records": 6, + "_links": { + "self": { + "href": "/api/storage/volumes/*/top-metrics/files?top_metric=iops.write&volume.name=osc_vol01" + } + } +} \ No newline at end of file diff --git a/cmd/collectors/restperf/plugins/volumetopclients/volumetopclients.go b/cmd/collectors/restperf/plugins/volumetopmetrics/volumetopmetrics.go similarity index 60% rename from cmd/collectors/restperf/plugins/volumetopclients/volumetopclients.go rename to cmd/collectors/restperf/plugins/volumetopmetrics/volumetopmetrics.go index a8c77e508..ea71af2e0 100644 --- a/cmd/collectors/restperf/plugins/volumetopclients/volumetopclients.go +++ b/cmd/collectors/restperf/plugins/volumetopmetrics/volumetopmetrics.go @@ -1,4 +1,4 @@ -package volumetopclients +package volumetopmetrics import ( "cmp" @@ -20,8 +20,10 @@ import ( type VolumeTracker interface { fetchTopClients(volumes *set.Set, svms *set.Set, metric string) ([]gjson.Result, error) + fetchTopFiles(volumes *set.Set, svms *set.Set, metric string) ([]gjson.Result, error) fetchVolumesWithActivityTrackingEnabled() (*set.Set, error) - processTopClients(data *matrix.Matrix) error + processTopClients(data *TopMetricsData) error + processTopFiles(data *TopMetricsData) error } const ( @@ -30,6 +32,10 @@ const ( topClientWriteOPSMatrix = "volume_top_clients_write_ops" topClientReadDataMatrix = "volume_top_clients_read_data" topClientWriteDataMatrix = "volume_top_clients_write_data" + topFileReadOPSMatrix = "volume_top_files_read_ops" + topFileWriteOPSMatrix = "volume_top_files_write_ops" + topFileReadDataMatrix = "volume_top_files_read_data" + topFileWriteDataMatrix = "volume_top_files_write_data" defaultTopN = 5 maxTopN = 50 ) @@ -38,14 +44,27 @@ var opMetric = "ops" var dataMetric = "data" -type TopClients struct { +type TopMetrics struct { *plugin.AbstractPlugin - schedule int - client *rest.Client - data map[string]*matrix.Matrix - cache *VolumeCache - maxVolumeCount int - tracker VolumeTracker + schedule int + client *rest.Client + data map[string]*matrix.Matrix + cache *VolumeCache + maxVolumeCount int + tracker VolumeTracker + clientMetricsEnabled bool + fileMetricsEnabled bool +} + +type TopMetricsData struct { + readOpsVolumes *set.Set + readOpsSvms *set.Set + writeOpsVolumes *set.Set + writeOpsSvms *set.Set + readDataVolumes *set.Set + readDataSvms *set.Set + writeDataVolumes *set.Set + writeDataSvms *set.Set } type VolumeCache struct { @@ -59,10 +78,10 @@ type MetricValue struct { } func New(p *plugin.AbstractPlugin) plugin.Plugin { - return &TopClients{AbstractPlugin: p} + return &TopMetrics{AbstractPlugin: p} } -func (t *TopClients) InitAllMatrix() error { +func (t *TopMetrics) InitAllMatrix() error { t.data = make(map[string]*matrix.Matrix) mats := []struct { name string @@ -73,6 +92,10 @@ func (t *TopClients) InitAllMatrix() error { {topClientWriteOPSMatrix, "volume_top_clients_write", opMetric}, {topClientReadDataMatrix, "volume_top_clients_read", dataMetric}, {topClientWriteDataMatrix, "volume_top_clients_write", dataMetric}, + {topFileReadOPSMatrix, "volume_top_files_read", opMetric}, + {topFileWriteOPSMatrix, "volume_top_files_write", opMetric}, + {topFileReadDataMatrix, "volume_top_files_read", dataMetric}, + {topFileWriteDataMatrix, "volume_top_files_write", dataMetric}, } for _, m := range mats { @@ -83,7 +106,7 @@ func (t *TopClients) InitAllMatrix() error { return nil } -func (t *TopClients) initMatrix(name string, object string, inputMat map[string]*matrix.Matrix, metric string) error { +func (t *TopMetrics) initMatrix(name string, object string, inputMat map[string]*matrix.Matrix, metric string) error { matrixName := t.Parent + name inputMat[name] = matrix.New(matrixName, object, name) for _, v1 := range t.data { @@ -97,7 +120,7 @@ func (t *TopClients) initMatrix(name string, object string, inputMat map[string] return nil } -func (t *TopClients) Init(remote conf.Remote) error { +func (t *TopMetrics) Init(remote conf.Remote) error { var err error if err := t.InitAbc(); err != nil { return err @@ -125,12 +148,31 @@ func (t *TopClients) Init(remote conf.Remote) error { t.maxVolumeCount = min(maxVolCount, maxTopN) } } + + // enable client and file metrics collection by default + t.clientMetricsEnabled = true + t.fileMetricsEnabled = true + + if objects := t.Params.GetChildS("objects"); objects != nil { + o := objects.GetAllChildContentS() + if !slices.Contains(o, "client") { + t.clientMetricsEnabled = false + } + if !slices.Contains(o, "file") { + t.fileMetricsEnabled = false + } + } t.schedule = t.SetPluginInterval() t.SLogger.Info("Using", slog.Int("maxVolumeCount", t.maxVolumeCount)) return nil } -func (t *TopClients) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util.Metadata, error) { +func (t *TopMetrics) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util.Metadata, error) { + // if both client and file metrics are disabled then return + if !t.clientMetricsEnabled && !t.fileMetricsEnabled { + return nil, nil, nil + } + data := dataMap[t.Object] t.client.Metadata.Reset() err := t.InitAllMatrix() @@ -141,10 +183,25 @@ func (t *TopClients) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, * // Set all global labels if already not exist t.data[k].SetGlobalLabels(data.GetGlobalLabels()) } - err = t.processTopClients(data) + + metricsData, err := t.processTopMetrics(data) if err != nil { return nil, nil, err } + + if t.clientMetricsEnabled { + err = t.processTopClients(metricsData) + if err != nil { + return nil, nil, err + } + } + if t.fileMetricsEnabled { + err = t.processTopFiles(metricsData) + if err != nil { + return nil, nil, err + } + } + result := make([]*matrix.Matrix, 0, len(t.data)) var pluginInstances uint64 @@ -154,14 +211,14 @@ func (t *TopClients) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, * continue } result = append(result, value) - pluginInstances = +uint64(len(value.GetInstances())) + pluginInstances += uint64(len(value.GetInstances())) } t.client.Metadata.PluginInstances = pluginInstances return result, t.client.Metadata, err } -func (t *TopClients) getCachedVolumesWithActivityTracking() (*set.Set, error) { +func (t *TopMetrics) getCachedVolumesWithActivityTracking() (*set.Set, error) { var ( va *set.Set @@ -186,14 +243,54 @@ func (t *TopClients) getCachedVolumesWithActivityTracking() (*set.Set, error) { return va, nil } -func (t *TopClients) processTopClients(data *matrix.Matrix) error { +func (t *TopMetrics) processTopClients(metricsData *TopMetricsData) error { + if err := t.processTopClientsByMetric(metricsData.readOpsVolumes, metricsData.readOpsSvms, topClientReadOPSMatrix, "iops.read", opMetric); err != nil { + return err + } + + if err := t.processTopClientsByMetric(metricsData.writeOpsVolumes, metricsData.writeOpsSvms, topClientWriteOPSMatrix, "iops.write", opMetric); err != nil { + return err + } + + if err := t.processTopClientsByMetric(metricsData.readDataVolumes, metricsData.readDataSvms, topClientReadDataMatrix, "throughput.read", dataMetric); err != nil { + return err + } + + if err := t.processTopClientsByMetric(metricsData.writeDataVolumes, metricsData.writeDataSvms, topClientWriteDataMatrix, "throughput.write", dataMetric); err != nil { + return err + } + + return nil +} + +func (t *TopMetrics) processTopFiles(metricsData *TopMetricsData) error { + if err := t.processTopFilesByMetric(metricsData.readOpsVolumes, metricsData.readOpsSvms, topFileReadOPSMatrix, "iops.read", opMetric); err != nil { + return err + } + + if err := t.processTopFilesByMetric(metricsData.writeOpsVolumes, metricsData.writeOpsSvms, topFileWriteOPSMatrix, "iops.write", opMetric); err != nil { + return err + } + + if err := t.processTopFilesByMetric(metricsData.readDataVolumes, metricsData.readDataSvms, topFileReadDataMatrix, "throughput.read", dataMetric); err != nil { + return err + } + + if err := t.processTopFilesByMetric(metricsData.writeDataVolumes, metricsData.writeDataSvms, topFileWriteDataMatrix, "throughput.write", dataMetric); err != nil { + return err + } + + return nil +} + +func (t *TopMetrics) processTopMetrics(data *matrix.Matrix) (*TopMetricsData, error) { va, err := t.getCachedVolumesWithActivityTracking() if err != nil { - return err + return nil, err } if va.Size() == 0 { - return nil + return nil, nil } filteredDataInstances := set.New() @@ -217,26 +314,19 @@ func (t *TopClients) processTopClients(data *matrix.Matrix) error { readDataVolumes, readDataSvms := t.extractVolumesAndSvms(data, topReadData) writeDataVolumes, writeDataSvms := t.extractVolumesAndSvms(data, topWriteData) - if err := t.processTopClientsByMetric(readOpsVolumes, readOpsSvms, topClientReadOPSMatrix, "iops.read", opMetric); err != nil { - return err - } - - if err := t.processTopClientsByMetric(writeOpsVolumes, writeOpsSvms, topClientWriteOPSMatrix, "iops.write", opMetric); err != nil { - return err - } - - if err := t.processTopClientsByMetric(readDataVolumes, readDataSvms, topClientReadDataMatrix, "throughput.read", dataMetric); err != nil { - return err - } - - if err := t.processTopClientsByMetric(writeDataVolumes, writeDataSvms, topClientWriteDataMatrix, "throughput.write", dataMetric); err != nil { - return err - } - - return nil + return &TopMetricsData{ + readOpsVolumes: readOpsVolumes, + readOpsSvms: readOpsSvms, + writeOpsVolumes: writeOpsVolumes, + writeOpsSvms: writeOpsSvms, + readDataVolumes: readDataVolumes, + readDataSvms: readDataSvms, + writeDataVolumes: writeDataVolumes, + writeDataSvms: writeDataSvms, + }, nil } -func (t *TopClients) collectMetricValues(data *matrix.Matrix, filteredDataInstances *set.Set) ([]MetricValue, []MetricValue, []MetricValue, []MetricValue) { +func (t *TopMetrics) collectMetricValues(data *matrix.Matrix, filteredDataInstances *set.Set) ([]MetricValue, []MetricValue, []MetricValue, []MetricValue) { readOpsMetric := data.GetMetric("total_read_ops") writeOpsMetric := data.GetMetric("total_write_ops") readDataMetric := data.GetMetric("bytes_read") @@ -277,7 +367,7 @@ func (t *TopClients) collectMetricValues(data *matrix.Matrix, filteredDataInstan return readOpsList, writeOpsList, readDataList, writeDataList } -func (t *TopClients) getTopN(metricList []MetricValue) []MetricValue { +func (t *TopMetrics) getTopN(metricList []MetricValue) []MetricValue { slices.SortFunc(metricList, func(a, b MetricValue) int { return cmp.Compare(b.value, a.value) // Sort in descending order }) @@ -288,7 +378,7 @@ func (t *TopClients) getTopN(metricList []MetricValue) []MetricValue { return metricList } -func (t *TopClients) extractVolumesAndSvms(data *matrix.Matrix, topMetrics []MetricValue) (*set.Set, *set.Set) { +func (t *TopMetrics) extractVolumesAndSvms(data *matrix.Matrix, topMetrics []MetricValue) (*set.Set, *set.Set) { volumes := set.New() svms := set.New() for _, item := range topMetrics { @@ -303,7 +393,40 @@ func (t *TopClients) extractVolumesAndSvms(data *matrix.Matrix, topMetrics []Met return volumes, svms } -func (t *TopClients) processTopClientsByMetric(volumes, svms *set.Set, matrixName, metric, metricType string) error { +func (t *TopMetrics) processTopFilesByMetric(volumes, svms *set.Set, matrixName, metric, metricType string) error { + if svms.Size() == 0 || volumes.Size() == 0 { + return nil + } + + topFiles, err := t.fetchTopFiles(volumes, svms, metric) + if err != nil { + return err + } + + mat := t.data[matrixName] + if mat == nil { + return nil + } + for _, client := range topFiles { + path := client.Get("path").String() + vol := client.Get("volume.name").String() + svm := client.Get("svm.name").String() + value := client.Get(metric).Float() + instanceKey := path + keyToken + vol + keyToken + svm + instance, err := mat.NewInstance(instanceKey) + if err != nil { + t.SLogger.Warn("error while creating instance", slogx.Err(err), slog.String("volume", vol)) + continue + } + instance.SetLabel("volume", vol) + instance.SetLabel("svm", svm) + instance.SetLabel("path", path) + t.setMetric(mat, instance, value, metricType) + } + return nil +} + +func (t *TopMetrics) processTopClientsByMetric(volumes, svms *set.Set, matrixName, metric, metricType string) error { if svms.Size() == 0 || volumes.Size() == 0 { return nil } @@ -336,7 +459,7 @@ func (t *TopClients) processTopClientsByMetric(volumes, svms *set.Set, matrixNam return nil } -func (t *TopClients) setMetric(mat *matrix.Matrix, instance *matrix.Instance, value float64, metricType string) { +func (t *TopMetrics) setMetric(mat *matrix.Matrix, instance *matrix.Instance, value float64, metricType string) { var err error m := mat.GetMetric(metricType) if m == nil { @@ -350,7 +473,7 @@ func (t *TopClients) setMetric(mat *matrix.Matrix, instance *matrix.Instance, va } } -func (t *TopClients) fetchVolumesWithActivityTrackingEnabled() (*set.Set, error) { +func (t *TopMetrics) fetchVolumesWithActivityTrackingEnabled() (*set.Set, error) { var ( result []gjson.Result err error @@ -379,7 +502,7 @@ func (t *TopClients) fetchVolumesWithActivityTrackingEnabled() (*set.Set, error) return va, nil } -func (t *TopClients) fetchTopClients(volumes *set.Set, svms *set.Set, metric string) ([]gjson.Result, error) { +func (t *TopMetrics) fetchTopClients(volumes *set.Set, svms *set.Set, metric string) ([]gjson.Result, error) { var ( result []gjson.Result err error @@ -401,3 +524,26 @@ func (t *TopClients) fetchTopClients(volumes *set.Set, svms *set.Set, metric str return result, nil } + +func (t *TopMetrics) fetchTopFiles(volumes *set.Set, svms *set.Set, metric string) ([]gjson.Result, error) { + var ( + result []gjson.Result + err error + ) + if t.tracker != nil { + return t.tracker.fetchTopFiles(volumes, svms, metric) + } + query := "api/storage/volumes/*/top-metrics/files" + href := rest.NewHrefBuilder(). + APIPath(query). + Fields([]string{"path", "svm", "volume.name", metric}). + MaxRecords(collectors.DefaultBatchSize). + Filter([]string{"top_metric=" + metric, "volume=" + strings.Join(volumes.Values(), "|"), "svm=" + strings.Join(svms.Values(), "|")}). + Build() + + if result, err = collectors.InvokeRestCall(t.client, href); err != nil { + return result, err + } + + return result, nil +} diff --git a/cmd/collectors/restperf/plugins/volumetopclients/volumetopclients_test.go b/cmd/collectors/restperf/plugins/volumetopmetrics/volumetopmetrics_test.go similarity index 56% rename from cmd/collectors/restperf/plugins/volumetopclients/volumetopclients_test.go rename to cmd/collectors/restperf/plugins/volumetopmetrics/volumetopmetrics_test.go index a8ff51af3..4ff50b5f5 100644 --- a/cmd/collectors/restperf/plugins/volumetopclients/volumetopclients_test.go +++ b/cmd/collectors/restperf/plugins/volumetopmetrics/volumetopmetrics_test.go @@ -1,4 +1,4 @@ -package volumetopclients +package volumetopmetrics import ( "testing" @@ -13,7 +13,7 @@ import ( var globalDataMatrix *matrix.Matrix type MockVolume struct { - *TopClients + *TopMetrics testFilePath string } @@ -21,6 +21,10 @@ func (mv *MockVolume) fetchTopClients(_ *set.Set, _ *set.Set, _ string) ([]gjson return collectors.InvokeRestCallWithTestFile(nil, "", mv.testFilePath) } +func (mv *MockVolume) fetchTopFiles(_ *set.Set, _ *set.Set, _ string) ([]gjson.Result, error) { + return collectors.InvokeRestCallWithTestFile(nil, "", mv.testFilePath) +} + func (mv *MockVolume) fetchVolumesWithActivityTrackingEnabled() (*set.Set, error) { va := set.New() va.Add("osc" + keyToken + "osc_vol01") @@ -29,9 +33,9 @@ func (mv *MockVolume) fetchVolumesWithActivityTrackingEnabled() (*set.Set, error } func NewMockVolume(p *plugin.AbstractPlugin, testFilePath string) *MockVolume { - v := &TopClients{AbstractPlugin: p} + v := &TopMetrics{AbstractPlugin: p} mockVolume := &MockVolume{ - TopClients: v, + TopMetrics: v, testFilePath: testFilePath, } mockVolume.tracker = mockVolume @@ -78,10 +82,58 @@ func TestProcessTopClients(t *testing.T) { testFilePath string expectedCount int }{ - {"Read Ops", "iops.read", topClientReadOPSMatrix, "testdata/readops.json", 1}, - {"Write Ops", "iops.write", topClientWriteOPSMatrix, "testdata/writeops.json", 4}, - {"Read Data", "throughput.read", topClientReadDataMatrix, "testdata/readdata.json", 1}, - {"Write Data", "throughput.write", topClientWriteDataMatrix, "testdata/writedata.json", 3}, + {"Client Read Ops", "iops.read", topClientReadOPSMatrix, "testdata/client_readops.json", 1}, + {"Client Write Ops", "iops.write", topClientWriteOPSMatrix, "testdata/client_writeops.json", 4}, + {"Client Read Data", "throughput.read", topClientReadDataMatrix, "testdata/client_readdata.json", 1}, + {"Client Write Data", "throughput.write", topClientWriteDataMatrix, "testdata/client_writedata.json", 3}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockVolume := NewMockVolume(&plugin.AbstractPlugin{}, tc.testFilePath) + mockVolume.maxVolumeCount = 5 + + err := mockVolume.InitAllMatrix() + if err != nil { + t.Errorf("InitAllMatrix should not return an error: %v", err) + } + + data := globalDataMatrix + + metrics, err := mockVolume.processTopMetrics(data) + if err != nil { + return + } + + err = mockVolume.processTopClients(metrics) + if err != nil { + t.Errorf("processTopClients should not return an error: %v", err) + } + + resultMatrix := mockVolume.data[tc.matrixName] + + if resultMatrix == nil { + t.Errorf("%s Matrix should be initialized", tc.matrixName) + } + if len(resultMatrix.GetInstances()) != tc.expectedCount { + t.Errorf("%s Matrix should have %d instance(s), got %d", tc.matrixName, tc.expectedCount, len(resultMatrix.GetInstances())) + } + }) + } +} + +func TestProcessTopFiles(t *testing.T) { + testCases := []struct { + name string + metric string + matrixName string + testFilePath string + expectedCount int + }{ + {"File Read Ops", "iops.read", topFileReadOPSMatrix, "testdata/file_readops.json", 1}, + {"File Write Ops", "iops.write", topFileWriteOPSMatrix, "testdata/file_writeops.json", 6}, + {"File Read Data", "throughput.read", topFileReadDataMatrix, "testdata/file_readdata.json", 1}, + {"File Write Data", "throughput.write", topFileWriteDataMatrix, "testdata/file_writedata.json", 1}, } for _, tc := range testCases { @@ -96,7 +148,12 @@ func TestProcessTopClients(t *testing.T) { data := globalDataMatrix - err = mockVolume.processTopClients(data) + metrics, err := mockVolume.processTopMetrics(data) + if err != nil { + return + } + + err = mockVolume.processTopFiles(metrics) if err != nil { t.Errorf("processTopClients should not return an error: %v", err) } diff --git a/cmd/collectors/restperf/restperf.go b/cmd/collectors/restperf/restperf.go index cfb3e056b..c3a894e6e 100644 --- a/cmd/collectors/restperf/restperf.go +++ b/cmd/collectors/restperf/restperf.go @@ -13,7 +13,7 @@ import ( "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/nic" "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/volume" "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/volumetag" - "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/volumetopclients" + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/volumetopmetrics" "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/vscan" "github.com/netapp/harvest/v2/cmd/poller/collector" "github.com/netapp/harvest/v2/cmd/poller/plugin" @@ -1463,7 +1463,7 @@ func (r *RestPerf) LoadPlugin(kind string, p *plugin.AbstractPlugin) plugin.Plug case "VolumeTag": return volumetag.New(p) case "VolumeTopClients": - return volumetopclients.New(p) + return volumetopmetrics.New(p) case "Disk": return disk.New(p) case "Vscan": diff --git a/conf/restperf/9.12.0/volume.yaml b/conf/restperf/9.12.0/volume.yaml index 64462718e..6c8628976 100644 --- a/conf/restperf/9.12.0/volume.yaml +++ b/conf/restperf/9.12.0/volume.yaml @@ -49,12 +49,14 @@ plugins: include_constituents: false # - VolumeTopClients: -# # The maximum number of volumes to consider for top client metrics. -# # The actual maximum value is capped at 50, even if a higher number is specified. +# # `max_volumes` is the maximum number of volumes to consider for top client metrics. This value is capped at 50, even if a higher number is specified. # # When enabled, this plugin will collect read/write operations and throughput metrics for the top clients of each volume. # - schedule: # - data: 1h # This value should be a multiple of the poll duration. By default, Harvest will check once an hour to see how many volumes have activity_tracking.state set to on. # - max_volumes: 5 +# - objects: +# - client # collect read/write operations and throughput metrics for the top clients. +# - file # collect read/write operations and throughput metrics for the top files # - LabelAgent: # # To prevent visibility of transient volumes, uncomment the following lines diff --git a/docs/plugins.md b/docs/plugins.md index c27ba96a2..c90d8250a 100644 --- a/docs/plugins.md +++ b/docs/plugins.md @@ -761,11 +761,11 @@ You can view the metrics published by the ChangeLog plugin in the `ChangeLog Mon # VolumeTopClients -The `VolumeTopClients` plugin is used to track a volume's top clients for volumes in terms of read and write IOPS, as well as read and write throughput. This plugin is available only through the RestPerf Collector in ONTAP version 9.12 and later. +The `VolumeTopClients` plugin is used to track a volume's top clients and top files in terms of read and write IOPS, as well as read and write throughput. This plugin is available only through the RestPerf Collector in ONTAP version 9.12 and later. ## Enabling the Plugin -Top Clients collection is disabled by default. To enable Top Clients tracking in Harvest, follow these steps: +Top Clients and Files collection is disabled by default. To enable Top Clients and Files tracking in Harvest, follow these steps: 1. Ensure you are using ONTAP version 9.12 or later. 2. Enable the Top Clients collection in the RestPerf Collector Volume template via the `VolumeTopClients` plugin. @@ -783,8 +783,20 @@ The plugin will select the top volumes based on the descending order of read IOP 1. Collect the read IOPS, write IOPS, read throughput, and write throughput for all volumes. 2. Sort the volumes in descending order based on their metric values. 3. Select the top volumes as specified by `max_volumes`. -4. Collect top clients metrics for these volumes. +4. Collect top clients and top files metrics for these volumes. + +### `objects` + +The `objects` parameter allows you to specify which metrics to collect. By default, both client and file data will be collected. You can customize this by including or excluding specific objects: + +```yaml +- objects: + - client # collect read/write operations and throughput metrics for the top clients. + - file # collect read/write operations and throughput metrics for the top files +``` + +If the `objects` parameter is not defined, both client and file data will be collected by default. ## Viewing the Metrics -You can view the metrics published by the `VolumeTopClients` plugin in the `Volume` dashboard under the `Top Clients` row in Grafana. \ No newline at end of file +You can view the metrics published by the `VolumeTopClients` plugin in the `Volume` dashboard under the `Clients` and `Files` row in Grafana. \ No newline at end of file