From 450b088faf78de02b1cf4613d2db49be910b260b Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Fri, 28 Jun 2024 11:36:45 -0400 Subject: [PATCH 1/9] [chore] receiver/vcenter - Updates govmomi to v0.37.3 and fixes unit tests (#33721) **Description:** Updates govmomi library for `vcenterreceiver` to v0.37.3. This makes use of the `RetrievePropertiesEx` function as `RetrieveProperties` is deprecated. All of the related unit tests had to be redone to consider this. There are no customer facing changes with this. **Link to tracking Issue:** #32453 **Testing:** Unit tests/integration tests pass. Manual check against live environment. **Documentation:** No customer facing changes. --- cmd/otelcontribcol/go.mod | 2 +- cmd/otelcontribcol/go.sum | 4 +- receiver/vcenterreceiver/go.mod | 2 +- receiver/vcenterreceiver/go.sum | 4 +- .../internal/mockserver/client_mock.go | 23 +- .../mockserver/responses/cluster-children.xml | 28 +- .../mockserver/responses/compute-children.xml | 28 +- .../responses/compute-default-properties.xml | 218 +- .../responses/datacenter-folder.xml | 19 + .../responses/datacenter-properties.xml | 48 +- .../mockserver/responses/datacenter.xml | 16 +- .../datastore-default-properties.xml | 30 +- .../responses/host-default-properties.xml | 152 +- .../responses/host-folder-children.xml | 44 +- .../responses/host-folder-parent.xml | 56 +- .../mockserver/responses/perf-manager.xml | 24966 ++++++++-------- .../resource-pool-default-properties.xml | 530 +- .../responses/retrieve-properties-empty.xml | 2 +- .../responses/vm-default-properties.xml | 524 +- .../responses/vm-folder-children.xml | 96 +- .../responses/vm-folder-parents.xml | 56 +- 21 files changed, 13471 insertions(+), 13377 deletions(-) create mode 100644 receiver/vcenterreceiver/internal/mockserver/responses/datacenter-folder.xml diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 85b95a904e88..db2cf0c9fd17 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -736,7 +736,7 @@ require ( github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e // indirect - github.com/vmware/govmomi v0.36.3 // indirect + github.com/vmware/govmomi v0.37.3 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 7b58e0eff920..9c18546ab5ef 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -2265,8 +2265,8 @@ github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vb github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e h1:Vu41Q0Pv3yMdd+tcDW6QeEUIK2L+9ZrPrq8NAMrKSLc= github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e/go.mod h1:aRq5pxwgdJpAuP97SCjX1+Db32z/b0dggQ07FDF+fqE= -github.com/vmware/govmomi v0.36.3 h1:1Ng3CBNQVbFjCQbKtfsewy5o3dFa+EoTjqeThVISUBc= -github.com/vmware/govmomi v0.36.3/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= +github.com/vmware/govmomi v0.37.3 h1:L2y2Ba09tYiZwdPtdF64Ox9QZeJ8vlCUGcAF9SdODn4= +github.com/vmware/govmomi v0.37.3/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= diff --git a/receiver/vcenterreceiver/go.mod b/receiver/vcenterreceiver/go.mod index e5f3f4f34a13..fc47ba574a0e 100644 --- a/receiver/vcenterreceiver/go.mod +++ b/receiver/vcenterreceiver/go.mod @@ -9,7 +9,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.103.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.103.0 github.com/stretchr/testify v1.9.0 - github.com/vmware/govmomi v0.36.3 + github.com/vmware/govmomi v0.37.3 go.opentelemetry.io/collector/component v0.103.0 go.opentelemetry.io/collector/config/configopaque v1.10.0 go.opentelemetry.io/collector/config/configtls v0.103.0 diff --git a/receiver/vcenterreceiver/go.sum b/receiver/vcenterreceiver/go.sum index 74ba8e162e2b..14d7895435c6 100644 --- a/receiver/vcenterreceiver/go.sum +++ b/receiver/vcenterreceiver/go.sum @@ -140,8 +140,8 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/vmware/govmomi v0.36.3 h1:1Ng3CBNQVbFjCQbKtfsewy5o3dFa+EoTjqeThVISUBc= -github.com/vmware/govmomi v0.36.3/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= +github.com/vmware/govmomi v0.37.3 h1:L2y2Ba09tYiZwdPtdF64Ox9QZeJ8vlCUGcAF9SdODn4= +github.com/vmware/govmomi v0.37.3/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= diff --git a/receiver/vcenterreceiver/internal/mockserver/client_mock.go b/receiver/vcenterreceiver/internal/mockserver/client_mock.go index d08e6a1c31d7..467dfc21a78d 100644 --- a/receiver/vcenterreceiver/internal/mockserver/client_mock.go +++ b/receiver/vcenterreceiver/internal/mockserver/client_mock.go @@ -75,8 +75,8 @@ func routeBody(t *testing.T, requestType string, body map[string]any) ([]byte, e return loadResponse("login.xml") case "Logout": return loadResponse("logout.xml") - case "RetrieveProperties": - return routeRetreiveProperties(t, body) + case "RetrievePropertiesEx": + return routeRetreivePropertiesEx(t, body) case "QueryPerf": return routePerformanceQuery(t, body) case "CreateContainerView": @@ -86,8 +86,8 @@ func routeBody(t *testing.T, requestType string, body map[string]any) ([]byte, e return []byte{}, errNotFound } -func routeRetreiveProperties(t *testing.T, body map[string]any) ([]byte, error) { - rp, ok := body["RetrieveProperties"].(map[string]any) +func routeRetreivePropertiesEx(t *testing.T, body map[string]any) ([]byte, error) { + rp, ok := body["RetrievePropertiesEx"].(map[string]any) require.True(t, ok) specSet := rp["specSet"].(map[string]any) @@ -108,13 +108,24 @@ func routeRetreiveProperties(t *testing.T, body map[string]any) ([]byte, error) var contentType string if !objectSetArray { obj = objectSet["obj"].(map[string]any) - content = obj["#content"].(string) + if value, exists := obj["#content"]; exists { + content = value.(string) + } else { + content = "" + } contentType = obj["-type"].(string) } switch { case content == "group-d1" && contentType == "Folder": - return loadResponse("datacenter.xml") + for _, i := range propSetArray { + m, ok := i.(map[string]any) + require.True(t, ok) + if m["type"] == "Folder" { + return loadResponse("datacenter.xml") + } + } + return loadResponse("datacenter-folder.xml") case content == "datacenter-3" && contentType == "Datacenter": return loadResponse("datacenter-properties.xml") diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/cluster-children.xml b/receiver/vcenterreceiver/internal/mockserver/responses/cluster-children.xml index 9fd8f9a297fc..067351c48a1e 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/cluster-children.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/cluster-children.xml @@ -1,21 +1,25 @@ - + - resgroup-9 - - name - Resources - + + resgroup-9 + + name + Resources + + - host-1002 - - name - esxi-27971.cf5e88ac.australia-southeast1.gve.goog - + + host-1002 + + name + esxi-27971.cf5e88ac.australia-southeast1.gve.goog + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/compute-children.xml b/receiver/vcenterreceiver/internal/mockserver/responses/compute-children.xml index b8560dc6d6e3..367f3ea3ad54 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/compute-children.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/compute-children.xml @@ -1,21 +1,25 @@ - + - resgroup-10 - - name - Resources - + + resgroup-10 + + name + Resources + + - host-1003 - - name - esxi-111.europe-southeast1.gve.goog - + + host-1003 + + name + esxi-111.europe-southeast1.gve.goog + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/compute-default-properties.xml b/receiver/vcenterreceiver/internal/mockserver/responses/compute-default-properties.xml index 53e905d1a8e3..289706c6b1a2 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/compute-default-properties.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/compute-default-properties.xml @@ -1,116 +1,120 @@ - + - domain-c8 - - name - Cluster - - - datastore - - datastore-1003 - - - - host - - host-1002 - - - - summary - - 280044 - 2468289376256 - 108 - 216 - 252846 - 2140347 - 3 - 3 - green - 1 - - 92 - 91 - 100 - - 64 - 163 - 23 - 100 - 0 - 0 - 0 - 0 - 13 - - 252846 - 2140347 - 18000 - 180640 - 0 - 0 - 22075 - 181115 - 8349 - 14880 - 185343 - 0 - 13 - - intel-cascadelake - vsga-baseline - - 57 - 92 - 363 - - notInMaintenanceMode - healthy - - host-1002 - 1 - - - + + domain-c8 + + name + Cluster + + + datastore + + datastore-1003 + + + + host + + host-1002 + + + + summary + + 280044 + 2468289376256 + 108 + 216 + 252846 + 2140347 + 3 + 3 + green + 1 + + 92 + 91 + 100 + + 64 + 163 + 23 + 100 + 0 + 0 + 0 + 0 + 13 + + 252846 + 2140347 + 18000 + 180640 + 0 + 0 + 22075 + 181115 + 8349 + 14880 + 185343 + 0 + 13 + + intel-cascadelake + vsga-baseline + + 57 + 92 + 363 + + notInMaintenanceMode + healthy + + host-1002 + 1 + + + + - domain-c9 - - name - esxi-111.europe-southeast1.gve.goog - - - datastore - - datastore-1003 - - - - host - - host-1003 - - - - summary - - 252846 - 2140347 - 108 - 216 - 3 - 3 - 280044 - 2468289376256 - green - - + + domain-c9 + + name + esxi-111.europe-southeast1.gve.goog + + + datastore + + datastore-1003 + + + + host + + host-1003 + + + + summary + + 252846 + 2140347 + 108 + 216 + 3 + 3 + 280044 + 2468289376256 + green + + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-folder.xml b/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-folder.xml new file mode 100644 index 000000000000..34a23fb3afde --- /dev/null +++ b/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-folder.xml @@ -0,0 +1,19 @@ + + + + + + + group-d1 + + name + Datacenters + + + + + + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-properties.xml b/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-properties.xml index 83eda304f6aa..588869556ff7 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-properties.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/datacenter-properties.xml @@ -1,30 +1,32 @@ - + - datacenter-3 - - datastoreFolder - group-s6 - - - hostFolder - group-h5 - - - name - Datacenter - - - networkFolder - group-n7 - - - vmFolder - group-v4 - + + datacenter-3 + + datastoreFolder + group-s6 + + + hostFolder + group-h5 + + + name + Datacenter + + + networkFolder + group-n7 + + + vmFolder + group-v4 + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/datacenter.xml b/receiver/vcenterreceiver/internal/mockserver/responses/datacenter.xml index 624667a9170b..6789422edd85 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/datacenter.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/datacenter.xml @@ -4,14 +4,16 @@ xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> - + - datacenter-3 - - name - Datacenter - + + datacenter-3 + + name + Datacenter + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/datastore-default-properties.xml b/receiver/vcenterreceiver/internal/mockserver/responses/datastore-default-properties.xml index ad43d9e87c9b..6b5bf38db96a 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/datastore-default-properties.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/datastore-default-properties.xml @@ -1,22 +1,24 @@ - + + datastore-1003 - - name - vsanDatastore - - - summary.capacity - 57611315257344 - - - summary.freeSpace - 51693551508648 - + + name + vsanDatastore + + + summary.capacity + 57611315257344 + + + summary.freeSpace + 51693551508648 + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/host-default-properties.xml b/receiver/vcenterreceiver/internal/mockserver/responses/host-default-properties.xml index 0f9a08b0d43a..2f1ce3c9c456 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/host-default-properties.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/host-default-properties.xml @@ -1,83 +1,87 @@ - + - host-1002 - - name - esxi-27971.cf5e88ac.australia-southeast1.gve.goog - - - parent - domain-c8 - - - summary.hardware.cpuMhz - 2593 - - - summary.hardware.memorySize - 822763126784 - - - summary.hardware.numCpuCores - 36 - - - summary.quickStats.overallCpuUsage - 6107 - - - summary.quickStats.overallMemoryUsage - 140833 - - - vm - - vm-1040 - vm-6004 - vm-template - - + + host-1002 + + name + esxi-27971.cf5e88ac.australia-southeast1.gve.goog + + + parent + domain-c8 + + + summary.hardware.cpuMhz + 2593 + + + summary.hardware.memorySize + 822763126784 + + + summary.hardware.numCpuCores + 36 + + + summary.quickStats.overallCpuUsage + 6107 + + + summary.quickStats.overallMemoryUsage + 140833 + + + vm + + vm-1040 + vm-6004 + vm-template + + + - host-1003 - - name - esxi-111.europe-southeast1.gve.goog - - - parent - domain-c9 - - - summary.hardware.cpuMhz - 2593 - - - summary.hardware.memorySize - 822763126784 - - - summary.hardware.numCpuCores - 36 - - - summary.quickStats.overallCpuUsage - 6107 - - - summary.quickStats.overallMemoryUsage - 140833 - - - vm - - vm-6005 - - + + host-1003 + + name + esxi-111.europe-southeast1.gve.goog + + + parent + domain-c9 + + + summary.hardware.cpuMhz + 2593 + + + summary.hardware.memorySize + 822763126784 + + + summary.hardware.numCpuCores + 36 + + + summary.quickStats.overallCpuUsage + 6107 + + + summary.quickStats.overallMemoryUsage + 140833 + + + vm + + vm-6005 + + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-children.xml b/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-children.xml index b99300e45326..5084511a7833 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-children.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-children.xml @@ -1,29 +1,33 @@ - + - domain-c8 - - name - Cluster - - - resourcePool - resgroup-9 - + + domain-c8 + + name + Cluster + + + resourcePool + resgroup-9 + + - domain-c9 - - name - StandaloneHost - - - resourcePool - resgroup-10 - + + domain-c9 + + name + StandaloneHost + + + resourcePool + resgroup-10 + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-parent.xml b/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-parent.xml index 2bce16abf9cf..3e555ab0b9eb 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-parent.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/host-folder-parent.xml @@ -1,36 +1,42 @@ - + - group-h5 - - name - host - - - parent - datacenter-3 - + + group-h5 + + name + host + + + parent + datacenter-3 + + - datacenter-3 - - name - Datacenter - - - parent - group-d1 - + + datacenter-3 + + name + Datacenter + + + parent + group-d1 + + - group-d1 - - name - Datacenters - + + group-d1 + + name + Datacenters + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/perf-manager.xml b/receiver/vcenterreceiver/internal/mockserver/responses/perf-manager.xml index 11e1934daccf..3da3b3745def 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/perf-manager.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/perf-manager.xml @@ -1,12489 +1,12491 @@ - + - PerfMgr - - perfCounter - - - 1 - - - CPU usage as a percentage during the interval - usage - - - - CPU - cpu - - - - Percentage - percent - - none - rate - 4 - 4 - - - 2 - - - CPU usage as a percentage during the interval - usage - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 1 - 3 - - - 3 - - - CPU usage as a percentage during the interval - usage - - - - CPU - cpu - - - - Percentage - percent - - minimum - rate - 4 - 4 - - - 4 - - - CPU usage as a percentage during the interval - usage - - - - CPU - cpu - - - - Percentage - percent - - maximum - rate - 4 - 4 - - - 5 - - - CPU usage in megahertz during the interval - usagemhz - - - - CPU - cpu - - - - Megahertz - megaHertz - - none - rate - 4 - 4 - - - 6 - - - CPU usage in megahertz during the interval - usagemhz - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - rate - 1 - 3 - - - 7 - - - CPU usage in megahertz during the interval - usagemhz - - - - CPU - cpu - - - - Megahertz - megaHertz - - minimum - rate - 4 - 4 - - - 8 - - - CPU usage in megahertz during the interval - usagemhz - - - - CPU - cpu - - - - Megahertz - megaHertz - - maximum - rate - 4 - 4 - - - 9 - - - Total CPU capacity reserved by virtual machines - reservedCapacity - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - absolute - 2 - 3 - - - 10 - - - Amount of time spent on system processes on each virtual CPU in the virtual machine - system - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 3 - 3 - - - 11 - - - Total CPU time spent in wait state - wait - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 3 - 3 - - - 12 - - - Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval - ready - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 1 - 3 - - - 13 - - - Total time that the CPU spent in an idle state - idle - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 2 - 3 - - - 14 - - - Total CPU usage - used - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 3 - 3 - - - 15 - - - Capacity in MHz of the physical CPU cores - capacity.provisioned - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - absolute - 4 - 4 - - - 16 - - - CPU resources devoted by the ESXi scheduler to the virtual machines and resource pools - capacity.entitlement - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - absolute - 4 - 4 - - - 17 - - - CPU usage as a percent during the interval - capacity.usage - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - rate - 4 - 4 - - - 18 - - - The amount of CPU resources a VM would use if there were no CPU contention - capacity.demand - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - absolute - 4 - 4 - - - 19 - - - Percent of time the VM is unable to run because it is contending for access to the physical CPU(s) - capacity.contention - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 4 - 4 - - - 20 - - - The number of virtual processors provisioned to the entity - corecount.provisioned - - - - CPU - cpu - - - - Number - number - - average - absolute - 4 - 4 - - - 21 - - - The number of virtual processors running on the host - corecount.usage - - - - CPU - cpu - - - - Number - number - - average - absolute - 4 - 4 - - - 22 - - - Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraints - corecount.contention - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 4 - 4 - - - 23 - - - Percentage of host physical memory that has been consumed - usage - - - - Memory - mem - - - - Percentage - percent - - none - absolute - 4 - 4 - - - 24 - - - Percentage of host physical memory that has been consumed - usage - - - - Memory - mem - - - - Percentage - percent - - average - absolute - 1 - 3 - - - 25 - - - Percentage of host physical memory that has been consumed - usage - - - - Memory - mem - - - - Percentage - percent - - minimum - absolute - 4 - 4 - - - 26 - - - Percentage of host physical memory that has been consumed - usage - - - - Memory - mem - - - - Percentage - percent - - maximum - absolute - 4 - 4 - - - 27 - - - Memory reservation consumed by powered-on virtual machines - reservedCapacity - - - - Memory - mem - - - - Megabyte - megaBytes - - average - absolute - 2 - 3 - - - 28 - - - Amount of host physical memory or physical memory that is mapped for a virtual machine or a host - granted - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 29 - - - Amount of host physical memory or physical memory that is mapped for a virtual machine or a host - granted - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 30 - - - Amount of host physical memory or physical memory that is mapped for a virtual machine or a host - granted - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 31 - - - Amount of host physical memory or physical memory that is mapped for a virtual machine or a host - granted - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 32 - - - Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi - active - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 33 - - - Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi - active - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 34 - - - Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi - active - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 35 - - - Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi - active - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 36 - - - Amount of guest physical memory that is shared within a single virtual machine or across virtual machines - shared - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 37 - - - Amount of guest physical memory that is shared within a single virtual machine or across virtual machines - shared - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 38 - - - Amount of guest physical memory that is shared within a single virtual machine or across virtual machines - shared - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 39 - - - Amount of guest physical memory that is shared within a single virtual machine or across virtual machines - shared - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 40 - - - Guest physical memory pages whose content is 0x00 - zero - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 41 - - - Guest physical memory pages whose content is 0x00 - zero - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 42 - - - Guest physical memory pages whose content is 0x00 - zero - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 43 - - - Guest physical memory pages whose content is 0x00 - zero - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 44 - - - Amount by which reservation can be raised - unreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 45 - - - Amount by which reservation can be raised - unreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 46 - - - Amount by which reservation can be raised - unreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 47 - - - Amount by which reservation can be raised - unreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 48 - - - Swap storage space consumed - swapused - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 49 - - - Swap storage space consumed - swapused - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 50 - - - Swap storage space consumed - swapused - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 51 - - - Swap storage space consumed - swapused - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 52 - - - swapunreserved - swapunreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 53 - - - swapunreserved - swapunreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 54 - - - swapunreserved - swapunreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 55 - - - swapunreserved - swapunreserved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 56 - - - Amount of host physical memory that backs shared guest physical memory (Shared) - sharedcommon - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 57 - - - Amount of host physical memory that backs shared guest physical memory (Shared) - sharedcommon - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 58 - - - Amount of host physical memory that backs shared guest physical memory (Shared) - sharedcommon - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 59 - - - Amount of host physical memory that backs shared guest physical memory (Shared) - sharedcommon - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 60 - - - Virtual address space of ESXi that is dedicated to its heap - heap - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 61 - - - Virtual address space of ESXi that is dedicated to its heap - heap - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 62 - - - Virtual address space of ESXi that is dedicated to its heap - heap - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 63 - - - Virtual address space of ESXi that is dedicated to its heap - heap - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 64 - - - Free address space in the heap of ESXi. This is less than or equal to Heap - heapfree - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 65 - - - Free address space in the heap of ESXi. This is less than or equal to Heap - heapfree - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 66 - - - Free address space in the heap of ESXi. This is less than or equal to Heap - heapfree - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 67 - - - Free address space in the heap of ESXi. This is less than or equal to Heap - heapfree - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 68 - - - Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machines - state - - - - Memory - mem - - - - Number - number - - latest - absolute - 2 - 3 - - - 69 - - - Amount of guest physical memory that is swapped out to the swap space - swapped - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 70 - - - Amount of guest physical memory that is swapped out to the swap space - swapped - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 71 - - - Amount of guest physical memory that is swapped out to the swap space - swapped - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 72 - - - Amount of guest physical memory that is swapped out to the swap space - swapped - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 73 - - - Amount of memory that ESXi needs to reclaim by swapping - swaptarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 74 - - - Amount of memory that ESXi needs to reclaim by swapping - swaptarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 75 - - - Amount of memory that ESXi needs to reclaim by swapping - swaptarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 76 - - - Amount of memory that ESXi needs to reclaim by swapping - swaptarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 77 - - - swapIn - swapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 78 - - - swapIn - swapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 79 - - - swapIn - swapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 80 - - - swapIn - swapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 81 - - - swapOut - swapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 82 - - - swapOut - swapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 83 - - - swapOut - swapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 84 - - - swapOut - swapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 85 - - - Rate at which guest physical memory is swapped in from the swap space - swapinRate - - - - Memory - mem - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 1 - 3 - - - 86 - - - Rate at which guest physical memory is swapped out to the swap space - swapoutRate - - - - Memory - mem - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 1 - 3 - - - 87 - - - Amount of memory that is swapped out for the Service Console - swapOut - - - - Management agent - managementAgent - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 88 - - - Amount of memory that is swapped in for the Service Console - swapIn - - - - Management agent - managementAgent - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 89 - - - Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest - vmmemctl - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 90 - - - Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest - vmmemctl - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 1 - 3 - - - 91 - - - Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest - vmmemctl - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 92 - - - Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest - vmmemctl - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 93 - - - Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi - vmmemctltarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 94 - - - Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi - vmmemctltarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 95 - - - Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi - vmmemctltarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 96 - - - Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi - vmmemctltarget - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 97 - - - Amount of host physical memory consumed for backing up guest physical memory pages - consumed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 98 - - - Amount of host physical memory consumed for backing up guest physical memory pages - consumed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 1 - 3 - - - 99 - - - Amount of host physical memory consumed for backing up guest physical memory pages - consumed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 100 - - - Amount of host physical memory consumed for backing up guest physical memory pages - consumed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 101 - - - Host physical memory consumed by ESXi data structures for running the virtual machines - overhead - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 102 - - - Host physical memory consumed by ESXi data structures for running the virtual machines - overhead - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 1 - 1 - - - 103 - - - Host physical memory consumed by ESXi data structures for running the virtual machines - overhead - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 104 - - - Host physical memory consumed by ESXi data structures for running the virtual machines - overhead - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 105 - - - Guest physical memory pages that have undergone memory compression - compressed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 106 - - - Rate of guest physical memory page compression by ESXi - compressionRate - - - - Memory - mem - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 107 - - - Rate of guest physical memory decompression - decompressionRate - - - - Memory - mem - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 108 - - - Total amount of memory available to the host - capacity.provisioned - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 109 - - - Amount of host physical memory the VM is entitled to, as determined by the ESXi scheduler - capacity.entitlement - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 110 - - - Amount of physical memory available for use by virtual machines on this host - capacity.usable - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 111 - - - Amount of physical memory actively used - capacity.usage - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 112 - - - Percentage of time VMs are waiting to access swapped, compressed or ballooned memory - capacity.contention - - - - Memory - mem - - - - Percentage - percent - - average - rate - 4 - 4 - - - 113 - - - vm - capacity.usage.vm - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 114 - - - vmOvrhd - capacity.usage.vmOvrhd - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 115 - - - vmkOvrhd - capacity.usage.vmkOvrhd - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 116 - - - userworld - capacity.usage.userworld - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 117 - - - vm - reservedCapacity.vm - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 118 - - - vmOvhd - reservedCapacity.vmOvhd - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 119 - - - vmkOvrhd - reservedCapacity.vmkOvrhd - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 120 - - - userworld - reservedCapacity.userworld - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 121 - - - Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservations - reservedCapacityPct - - - - Memory - mem - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 122 - - - Amount of physical memory consumed by VMs on this host - consumed.vms - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 123 - - - Amount of physical memory consumed by userworlds on this host - consumed.userworlds - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 4 - - - 124 - - - Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. - usage - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - none - rate - 4 - 4 - - - 125 - - - Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. - usage - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 1 - 3 - - - 126 - - - Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. - usage - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - minimum - rate - 4 - 4 - - - 127 - - - Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. - usage - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - maximum - rate - 4 - 4 - - - 128 - - - Number of disk reads during the collection interval - numberRead - - - - Disk - disk - - - - Number - number - - summation - delta - 3 - 3 - - - 129 - - - Number of disk writes during the collection interval - numberWrite - - - - Disk - disk - - - - Number - number - - summation - delta - 3 - 3 - - - 130 - - - Average number of kilobytes read from the disk each second during the collection interval - read - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 131 - - - Average number of kilobytes written to disk each second during the collection interval - write - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 132 - - - Average amount of time taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine - totalLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 3 - 3 - - - 133 - - - Highest latency value across all disks used by the host - maxTotalLatency - - - - Disk - disk - - - - Millisecond - millisecond - - latest - absolute - 1 - 3 - - - 134 - - - Number of SCSI commands aborted during the collection interval - commandsAborted - - - - Disk - disk - - - - Number - number - - summation - delta - 2 - 3 - - - 135 - - - Number of SCSI-bus reset commands issued during the collection interval - busResets - - - - Disk - disk - - - - Number - number - - summation - delta - 2 - 3 - - - 136 - - - Average number of disk reads per second during the collection interval - numberReadAveraged - - - - Disk - disk - - - - Number - number - - average - rate - 1 - 3 - - - 137 - - - Average number of disk writes per second during the collection interval - numberWriteAveraged - - - - Disk - disk - - - - Number - number - - average - rate - 1 - 3 - - - 138 - - - Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection interval - throughput.usage - - - - Disk - disk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 139 - - - Average amount of time for an I/O operation to complete successfully - throughput.contention - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 140 - - - Number of SCSI reservation conflicts for the LUN during the collection interval - scsiReservationConflicts - - - - Disk - disk - - - - Number - number - - summation - delta - 2 - 2 - - - 141 - - - Number of SCSI reservation conflicts for the LUN as a percent of total commands during the collection interval - scsiReservationCnflctsPct - - - - Disk - disk - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 142 - - - Network utilization (combined transmit-rates and receive-rates) during the interval - usage - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - none - rate - 4 - 4 - - - 143 - - - Network utilization (combined transmit-rates and receive-rates) during the interval - usage - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 1 - 3 - - - 144 - - - Network utilization (combined transmit-rates and receive-rates) during the interval - usage - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - minimum - rate - 4 - 4 - - - 145 - - - Network utilization (combined transmit-rates and receive-rates) during the interval - usage - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - maximum - rate - 4 - 4 - - - 146 - - - Number of packets received during the interval - packetsRx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 147 - - - Number of packets transmitted during the interval - packetsTx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 148 - - - Average rate at which data was received during the interval - received - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 149 - - - Average rate at which data was transmitted during the interval - transmitted - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 150 - - - The maximum network bandwidth for the host - throughput.provisioned - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - absolute - 4 - 4 - - - 151 - - - The current available network bandwidth for the host - throughput.usable - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - absolute - 4 - 4 - - - 152 - - - The current network bandwidth usage for the host - throughput.usage - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 153 - - - The aggregate network droppped packets for the host - throughput.contention - - - - Network - net - - - - Number - number - - summation - delta - 4 - 4 - - - 154 - - - Average rate of packets received and transmitted per second - throughput.packetsPerSec - - - - Network - net - - - - Number - number - - average - rate - 4 - 4 - - - 155 - - - Total time elapsed, in seconds, since last system startup - uptime - - - - System - sys - - - - Second - second - - latest - absolute - 1 - 3 - - - 156 - - - Number of heartbeats issued per virtual machine during the interval - heartbeat - - - - System - sys - - - - Number - number - - summation - delta - 1 - 3 - - - 157 - - - Current power usage - power - - - - Power - power - - - - Watt - watt - - average - rate - 2 - 3 - - - 158 - - - Maximum allowed power usage - powerCap - - - - Power - power - - - - Watt - watt - - average - absolute - 3 - 3 - - - 159 - - - Total energy used since last stats reset - energy - - - - Power - power - - - - Joule - joule - - summation - delta - 3 - 3 - - - 160 - - - Current power usage as a percentage of maximum allowed power - capacity.usagePct - - - - Power - power - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 161 - - - Average number of commands issued per second by the storage adapter during the collection interval - commandsAveraged - - - - Storage adapter - storageAdapter - - - - Number - number - - average - rate - 2 - 2 - - - 162 - - - Average number of read commands issued per second by the storage adapter during the collection interval - numberReadAveraged - - - - Storage adapter - storageAdapter - - - - Number - number - - average - rate - 2 - 2 - - - 163 - - - Average number of write commands issued per second by the storage adapter during the collection interval - numberWriteAveraged - - - - Storage adapter - storageAdapter - - - - Number - number - - average - rate - 2 - 2 - - - 164 - - - Rate of reading data by the storage adapter - read - - - - Storage adapter - storageAdapter - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 2 - - - 165 - - - Rate of writing data by the storage adapter - write - - - - Storage adapter - storageAdapter - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 2 - - - 166 - - - The average time a read by the storage adapter takes - totalReadLatency - - - - Storage adapter - storageAdapter - - - - Millisecond - millisecond - - average - absolute - 2 - 2 - - - 167 - - - The average time a write by the storage adapter takes - totalWriteLatency - - - - Storage adapter - storageAdapter - - - - Millisecond - millisecond - - average - absolute - 2 - 2 - - - 168 - - - Highest latency value across all storage adapters used by the host - maxTotalLatency - - - - Storage adapter - storageAdapter - - - - Millisecond - millisecond - - latest - absolute - 3 - 3 - - - 169 - - - Average amount of time for an I/O operation to complete successfully - throughput.cont - - - - Storage adapter - storageAdapter - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 170 - - - The percent of I/Os that have been issued but have not yet completed - OIOsPct - - - - Storage adapter - storageAdapter - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 171 - - - Average number of read commands issued per second to the virtual disk during the collection interval - numberReadAveraged - - - - Virtual disk - virtualDisk - - - - Number - number - - average - rate - 1 - 3 - - - 172 - - - Average number of write commands issued per second to the virtual disk during the collection interval - numberWriteAveraged - - - - Virtual disk - virtualDisk - - - - Number - number - - average - rate - 1 - 3 - - - 173 - - - Rate of reading data from the virtual disk - read - - - - Virtual disk - virtualDisk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 2 - - - 174 - - - Rate of writing data to the virtual disk - write - - - - Virtual disk - virtualDisk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 2 - - - 175 - - - The average time a read from the virtual disk takes - totalReadLatency - - - - Virtual disk - virtualDisk - - - - Millisecond - millisecond - - average - absolute - 1 - 3 - - - 176 - - - The average time a write to the virtual disk takes - totalWriteLatency - - - - Virtual disk - virtualDisk - - - - Millisecond - millisecond - - average - absolute - 1 - 3 - - - 177 - - - Average amount of time for an I/O operation to complete successfully - throughput.cont - - - - Virtual disk - virtualDisk - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 178 - - - Average number of read commands issued per second to the datastore during the collection interval - numberReadAveraged - - - - Datastore - datastore - - - - Number - number - - average - rate - 1 - 3 - - - 179 - - - Average number of write commands issued per second to the datastore during the collection interval - numberWriteAveraged - - - - Datastore - datastore - - - - Number - number - - average - rate - 1 - 3 - - - 180 - - - Rate of reading data from the datastore - read - - - - Datastore - datastore - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 2 - - - 181 - - - Rate of writing data to the datastore - write - - - - Datastore - datastore - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 2 - - - 182 - - - The average time a read from the datastore takes - totalReadLatency - - - - Datastore - datastore - - - - Millisecond - millisecond - - average - absolute - 1 - 3 - - - 183 - - - The average time a write to the datastore takes - totalWriteLatency - - - - Datastore - datastore - - - - Millisecond - millisecond - - average - absolute - 1 - 3 - - - 184 - - - Highest latency value across all datastores used by the host - maxTotalLatency - - - - Datastore - datastore - - - - Millisecond - millisecond - - latest - absolute - 3 - 3 - - - 185 - - - Storage I/O Control aggregated IOPS - datastoreIops - - - - Datastore - datastore - - - - Number - number - - average - absolute - 1 - 3 - - - 186 - - - Storage I/O Control size-normalized I/O latency - sizeNormalizedDatastoreLatency - - - - Datastore - datastore - - - - Microsecond - microsecond - - average - absolute - 1 - 3 - - - 187 - - - usage - throughput.usage - - - - Datastore - datastore - - - - Kilobytes per second - kiloBytesPerSecond - - average - absolute - 4 - 4 - - - 188 - - - contention - throughput.contention - - - - Datastore - datastore - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 189 - - - busResets - busResets - - - - Datastore - datastore - - - - Number - number - - summation - delta - 2 - 2 - - - 190 - - - commandsAborted - commandsAborted - - - - Datastore - datastore - - - - Number - number - - summation - delta - 2 - 2 - - - 191 - - - Percentage of time Storage I/O Control actively controlled datastore latency - siocActiveTimePercentage - - - - Datastore - datastore - - - - Percentage - percent - - average - absolute - 1 - 3 - - - 192 - - - Average amount of time for an I/O operation to complete successfully - throughput.cont - - - - Storage path - storagePath - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 193 - - - Highest latency value across all storage paths used by the host - maxTotalLatency - - - - Storage path - storagePath - - - - Millisecond - millisecond - - latest - absolute - 3 - 3 - - - 194 - - - Virtual disk I/O rate - throughput.usage - - - - Virtual disk - virtualDisk - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 195 - - - Number of terminations to a virtual disk - commandsAborted - - - - Virtual disk - virtualDisk - - - - Number - number - - summation - delta - 2 - 4 - - - 196 - - - Number of resets to a virtual disk - busResets - - - - Virtual disk - virtualDisk - - - - Number - number - - summation - delta - 2 - 4 - - - 197 - - - The number of I/Os that have been issued but have not yet completed - outstandingIOs - - - - Storage adapter - storageAdapter - - - - Number - number - - average - absolute - 2 - 2 - - - 198 - - - The current number of I/Os that are waiting to be issued - queued - - - - Storage adapter - storageAdapter - - - - Number - number - - average - absolute - 2 - 2 - - - 199 - - - The maximum number of I/Os that can be outstanding at a given time - queueDepth - - - - Storage adapter - storageAdapter - - - - Number - number - - average - absolute - 2 - 2 - - - 200 - - - Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval - queueLatency - - - - Storage adapter - storageAdapter - - - - Millisecond - millisecond - - average - absolute - 2 - 2 - - - 201 - - - The storage adapter's I/O rate - throughput.usag - - - - Storage adapter - storageAdapter - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 202 - - - Number of SCSI-bus reset commands issued during the collection interval - busResets - - - - Storage path - storagePath - - - - Number - number - - summation - delta - 2 - 3 - - - 203 - - - Number of SCSI commands terminated during the collection interval - commandsAborted - - - - Storage path - storagePath - - - - Number - number - - summation - delta - 2 - 3 - - - 204 - - - Storage path I/O rate - throughput.usage - - - - Storage path - storagePath - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 205 - - - Average pNic I/O rate for VMs - throughput.usage.vm - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 206 - - - Average pNic I/O rate for NFS - throughput.usage.nfs - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 207 - - - Average pNic I/O rate for vMotion - throughput.usage.vmotion - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 208 - - - Average pNic I/O rate for FT - throughput.usage.ft - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 209 - - - Average pNic I/O rate for iSCSI - throughput.usage.iscsi - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 210 - - - Average pNic I/O rate for HBR - throughput.usage.hbr - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 211 - - - Current maximum allowed power usage - capacity.usable - - - - Power - power - - - - Watt - watt - - average - absolute - 4 - 4 - - - 212 - - - Current power usage - capacity.usage - - - - Power - power - - - - Watt - watt - - average - absolute - 4 - 4 - - - 213 - - - Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchy - cpuentitlement - - - - CPU - cpu - - - - Megahertz - megaHertz - - latest - absolute - 2 - 3 - - - 214 - - - Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or cluster - mementitlement - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 2 - 3 - - - 215 - - - DRS score of the virtual machine - vmDrsScore - - - - Cluster services - clusterServices - - - - Percentage - percent - - latest - absolute - 1 - 1 - - - 216 - - - Fairness of distributed CPU resource allocation - cpufairness - - - - Cluster services - clusterServices - - - - Number - number - - latest - absolute - 1 - 3 - - - 217 - - - Aggregate available memory resources of all the hosts within a cluster - memfairness - - - - Cluster services - clusterServices - - - - Number - number - - latest - absolute - 1 - 3 - - - 218 - - - The rate of transmitted packets for this VDS - throughput.pktsTx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 219 - - - The rate of transmitted Multicast packets for this VDS - throughput.pktsTxMulticast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 220 - - - The rate of transmitted Broadcast packets for this VDS - throughput.pktsTxBroadcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 221 - - - The rate of received packets for this vDS - throughput.pktsRx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 222 - - - The rate of received Multicast packets for this VDS - throughput.pktsRxMulticast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 223 - - - The rate of received Broadcast packets for this VDS - throughput.pktsRxBroadcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 224 - - - Count of dropped transmitted packets for this VDS - throughput.droppedTx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 225 - - - Count of dropped received packets for this VDS - throughput.droppedRx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 226 - - - The rate of transmitted packets for this DVPort - throughput.vds.pktsTx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 227 - - - The rate of transmitted multicast packets for this DVPort - throughput.vds.pktsTxMcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 228 - - - The rate of transmitted broadcast packets for this DVPort - throughput.vds.pktsTxBcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 229 - - - The rate of received packets for this DVPort - throughput.vds.pktsRx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 230 - - - The rate of received multicast packets for this DVPort - throughput.vds.pktsRxMcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 231 - - - The rate of received broadcast packets for this DVPort - throughput.vds.pktsRxBcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 232 - - - Count of dropped transmitted packets for this DVPort - throughput.vds.droppedTx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 233 - - - Count of dropped received packets for this DVPort - throughput.vds.droppedRx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 234 - - - The rate of transmitted packets for this LAG - throughput.vds.lagTx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 235 - - - The rate of transmitted Multicast packets for this LAG - throughput.vds.lagTxMcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 236 - - - The rate of transmitted Broadcast packets for this LAG - throughput.vds.lagTxBcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 237 - - - The rate of received packets for this LAG - throughput.vds.lagRx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 238 - - - The rate of received multicast packets for this LAG - throughput.vds.lagRxMcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 239 - - - The rate of received Broadcast packets for this LAG - throughput.vds.lagRxBcast - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 240 - - - Count of dropped transmitted packets for this LAG - throughput.vds.lagDropTx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 241 - - - Count of dropped received packets for this LAG - throughput.vds.lagDropRx - - - - Network - net - - - - Number - number - - average - absolute - 3 - 3 - - - 242 - - - Number of virtual machine power on operations - numPoweron - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 243 - - - Number of virtual machine power off operations - numPoweroff - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 244 - - - Number of virtual machine suspend operations - numSuspend - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 245 - - - Number of virtual machine reset operations - numReset - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 246 - - - Number of virtual machine guest reboot operations - numRebootGuest - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 247 - - - Number of virtual machine standby guest operations - numStandbyGuest - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 248 - - - Number of virtual machine guest shutdown operations - numShutdownGuest - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 249 - - - Number of virtual machine create operations - numCreate - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 250 - - - Number of virtual machine delete operations - numDestroy - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 251 - - - Number of virtual machine register operations - numRegister - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 252 - - - Number of virtual machine unregister operations - numUnregister - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 253 - - - Number of virtual machine reconfigure operations - numReconfigure - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 254 - - - Number of virtual machine clone operations - numClone - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 255 - - - Number of virtual machine template deploy operations - numDeploy - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 256 - - - Number of host change operations for powered-off and suspended VMs - numChangeHost - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 257 - - - Number of datastore change operations for powered-off and suspended virtual machines - numChangeDS - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 258 - - - Number of host and datastore change operations for powered-off and suspended virtual machines - numChangeHostDS - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 259 - - - Number of migrations with vMotion (host change operations for powered-on VMs) - numVMotion - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 260 - - - Number of migrations with Storage vMotion (datastore change operations for powered-on VMs) - numSVMotion - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 261 - - - Number of host and datastore change operations for powered-on and suspended virtual machines - numXVMotion - - - - Virtual machine operations - vmop - - - - Number - number - - latest - absolute - 1 - 3 - - - 262 - - - Total available CPU resources of all hosts within a cluster - effectivecpu - - - - Cluster services - clusterServices - - - - Megahertz - megaHertz - - average - rate - 1 - 3 - - - 263 - - - Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memory - effectivemem - - - - Cluster services - clusterServices - - - - Megabyte - megaBytes - - average - absolute - 1 - 3 - - - 264 - - - Total amount of CPU resources of all hosts in the cluster - totalmhz - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - rate - 1 - 3 - - - 265 - - - Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memory - totalmb - - - - Memory - mem - - - - Megabyte - megaBytes - - average - absolute - 1 - 3 - - - 266 - - - DRS score of the cluster - clusterDrsScore - - - - Cluster services - clusterServices - - - - Percentage - percent - - latest - absolute - 1 - 1 - - - 267 - - - vSphere HA number of failures that can be tolerated - failover - - - - Cluster services - clusterServices - - - - Number - number - - latest - absolute - 1 - 3 - - - 268 - - - Amount of space actually used by the virtual machine or the datastore - used - - - - Disk - disk - - - - Kilobyte - kiloBytes - - latest - absolute - 1 - 1 - - - 269 - - - Amount of storage set aside for use by a datastore or a virtual machine - provisioned - - - - Disk - disk - - - - Kilobyte - kiloBytes - - latest - absolute - 1 - 1 - - - 270 - - - Configured size of the datastore - capacity - - - - Disk - disk - - - - Kilobyte - kiloBytes - - latest - absolute - 1 - 3 - - - 271 - - - Amount of space associated exclusively with a virtual machine - unshared - - - - Disk - disk - - - - Kilobyte - kiloBytes - - latest - absolute - 1 - 1 - - - 272 - - - Storage overhead of a virtual machine or a datastore due to delta disk backings - deltaused - - - - Disk - disk - - - - Kilobyte - kiloBytes - - latest - absolute - 2 - 3 - - - 273 - - - provisioned - capacity.provisioned - - - - Disk - disk - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 274 - - - usage - capacity.usage - - - - Disk - disk - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 275 - - - contention - capacity.contention - - - - Disk - disk - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 276 - - - The latency of an activation operation in vCenter Server - activationlatencystats - - - - vCenter debugging information - vcDebugInfo - - - - Millisecond - millisecond - - maximum - absolute - 4 - 4 - - - 277 - - - The latency of an activation operation in vCenter Server - activationlatencystats - - - - vCenter debugging information - vcDebugInfo - - - - Millisecond - millisecond - - minimum - absolute - 4 - 4 - - - 278 - - - The latency of an activation operation in vCenter Server - activationlatencystats - - - - vCenter debugging information - vcDebugInfo - - - - Millisecond - millisecond - - summation - absolute - 1 - 1 - - - 279 - - - Activation operations in vCenter Server - activationstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 280 - - - Activation operations in vCenter Server - activationstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 281 - - - Activation operations in vCenter Server - activationstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 282 - - - buffersz - buffersz - - - - vCenter resource usage information - vcResources - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 283 - - - cachesz - cachesz - - - - vCenter resource usage information - vcResources - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 284 - - - Number of context switches per second on the system where vCenter Server is running - ctxswitchesrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 285 - - - diskreadsectorrate - diskreadsectorrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 4 - 4 - - - 286 - - - Number of disk reads per second on the system where vCenter Server is running - diskreadsrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 287 - - - diskwritesectorrate - diskwritesectorrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 4 - 4 - - - 288 - - - Number of disk writes per second on the system where vCenter Server is running - diskwritesrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 289 - - - The latency of a host sync operation in vCenter Server - hostsynclatencystats - - - - vCenter debugging information - vcDebugInfo - - - - Millisecond - millisecond - - maximum - absolute - 4 - 4 - - - 290 - - - The latency of a host sync operation in vCenter Server - hostsynclatencystats - - - - vCenter debugging information - vcDebugInfo - - - - Millisecond - millisecond - - minimum - absolute - 4 - 4 - - - 291 - - - The latency of a host sync operation in vCenter Server - hostsynclatencystats - - - - vCenter debugging information - vcDebugInfo - - - - Millisecond - millisecond - - summation - absolute - 1 - 1 - - - 292 - - - The number of host sync operations in vCenter Server - hostsyncstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 293 - - - The number of host sync operations in vCenter Server - hostsyncstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 294 - - - The number of host sync operations in vCenter Server - hostsyncstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 295 - - - vCenter Server inventory statistics - inventorystats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 296 - - - vCenter Server inventory statistics - inventorystats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 297 - - - vCenter Server inventory statistics - inventorystats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 298 - - - vCenter Server locking statistics - lockstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 299 - - - vCenter Server locking statistics - lockstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 300 - - - vCenter Server locking statistics - lockstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 301 - - - vCenter Server LRO statistics - lrostats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 302 - - - vCenter Server LRO statistics - lrostats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 303 - - - vCenter Server LRO statistics - lrostats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 304 - - - Miscellaneous statistics - miscstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 305 - - - Miscellaneous statistics - miscstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 306 - - - Miscellaneous statistics - miscstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 307 - - - Managed object reference counts in vCenter Server - morefregstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 308 - - - Managed object reference counts in vCenter Server - morefregstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 309 - - - Managed object reference counts in vCenter Server - morefregstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 310 - - - Rate of the number of total packets received per second on the system where vCenter Server is running - packetrecvrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 311 - - - Number of total packets sent per second on the system where vCenter Server is running - packetsentrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 312 - - - Total system CPU used on the system where vCenter Server in running - systemcpuusage - - - - vCenter resource usage information - vcResources - - - - Percentage - percent - - average - rate - 1 - 1 - - - 313 - - - Number of page faults per second on the system where vCenter Server is running - pagefaultrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 314 - - - Physical memory used by vCenter - physicalmemusage - - - - vCenter resource usage information - vcResources - - - - Kilobyte - kiloBytes - - average - absolute - 1 - 1 - - - 315 - - - CPU used by vCenter Server in privileged mode - priviledgedcpuusage - - - - vCenter resource usage information - vcResources - - - - Percentage - percent - - average - rate - 1 - 1 - - - 316 - - - Object counts in vCenter Server - scoreboard - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 317 - - - Object counts in vCenter Server - scoreboard - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 318 - - - Object counts in vCenter Server - scoreboard - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 3 - 3 - - - 319 - - - The statistics of client sessions connected to vCenter Server - sessionstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 320 - - - The statistics of client sessions connected to vCenter Server - sessionstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 321 - - - The statistics of client sessions connected to vCenter Server - sessionstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 322 - - - Number of systems calls made per second on the system where vCenter Server is running - syscallsrate - - - - vCenter resource usage information - vcResources - - - - Number - number - - average - rate - 1 - 1 - - - 323 - - - The statistics of vCenter Server as a running system such as thread statistics and heap statistics - systemstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 324 - - - The statistics of vCenter Server as a running system such as thread statistics and heap statistics - systemstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 325 - - - The statistics of vCenter Server as a running system such as thread statistics and heap statistics - systemstats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 326 - - - CPU used by vCenter Server in user mode - usercpuusage - - - - vCenter resource usage information - vcResources - - - - Percentage - percent - - average - rate - 1 - 1 - - - 327 - - - vCenter service statistics such as events, alarms, and tasks - vcservicestats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - maximum - absolute - 4 - 4 - - - 328 - - - vCenter service statistics such as events, alarms, and tasks - vcservicestats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - minimum - absolute - 4 - 4 - - - 329 - - - vCenter service statistics such as events, alarms, and tasks - vcservicestats - - - - vCenter debugging information - vcDebugInfo - - - - Number - number - - summation - absolute - 1 - 1 - - - 330 - - - Virtual memory used by vCenter Server - virtualmemusage - - - - vCenter resource usage information - vcResources - - - - Kilobyte - kiloBytes - - average - absolute - 1 - 1 - - - 331 - - - Average number of outstanding read requests to the virtual disk during the collection interval - readOIO - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 2 - 2 - - - 332 - - - Average number of outstanding write requests to the virtual disk during the collection interval - writeOIO - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 2 - 2 - - - 333 - - - Storage DRS virtual disk metric for the read workload model - readLoadMetric - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 2 - 2 - - - 334 - - - Storage DRS virtual disk metric for the write workload model - writeLoadMetric - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 2 - 2 - - - 335 - - - CPU active average over 1 minute - actav1 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 336 - - - Storage DRS datastore bytes read - datastoreReadBytes - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 2 - 2 - - - 337 - - - Storage DRS datastore bytes written - datastoreWriteBytes - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 2 - 2 - - - 338 - - - Storage DRS datastore read I/O rate - datastoreReadIops - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 1 - 3 - - - 339 - - - Storage DRS datastore write I/O rate - datastoreWriteIops - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 1 - 3 - - - 340 - - - Storage DRS datastore outstanding read requests - datastoreReadOIO - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 1 - 3 - - - 341 - - - Storage DRS datastore outstanding write requests - datastoreWriteOIO - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 1 - 3 - - - 342 - - - Storage DRS datastore normalized read latency - datastoreNormalReadLatency - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 2 - 2 - - - 343 - - - Storage DRS datastore normalized write latency - datastoreNormalWriteLatency - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 2 - 2 - - - 344 - - - Storage DRS datastore metric for read workload model - datastoreReadLoadMetric - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 4 - 4 - - - 345 - - - Storage DRS datastore metric for write workload model - datastoreWriteLoadMetric - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 4 - 4 - - - 346 - - - The average datastore latency as seen by virtual machines - datastoreVMObservedLatency - - - - Datastore - datastore - - - - Microsecond - microsecond - - latest - absolute - 1 - 3 - - - 401 - - - Number of SCSI reservation conflicts for the LUN as a percent of total commands during the collection interval - scsiReservationCnflctsPct - - - - Disk - disk - - - - Percentage - percent - - average - rate - 4 - 4 - - - 402 - - - The number of I/Os that have been issued but have not yet completed - outstandingIOs - - - - Storage adapter - storageAdapter - - - - Number - number - - latest - absolute - 4 - 4 - - - 403 - - - The current number of I/Os that are waiting to be issued - queued - - - - Storage adapter - storageAdapter - - - - Number - number - - latest - absolute - 4 - 4 - - - 404 - - - The maximum number of I/Os that can be outstanding at a given time - queueDepth - - - - Storage adapter - storageAdapter - - - - Number - number - - latest - absolute - 4 - 4 - - - 405 - - - CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) - utilization - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 2 - 3 - - - 406 - - - The number of virtual processors provisioned to the entity - corecount.provisioned - - - - CPU - cpu - - - - Number - number - - latest - absolute - 4 - 4 - - - 407 - - - The amount of L3 cache the VM uses - cache.l3.occupancy - - - - CPU - cpu - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 408 - - - The number of virtual processors running on the host - corecount.usage - - - - CPU - cpu - - - - Number - number - - latest - absolute - 4 - 4 - - - 409 - - - CPU load average over the past 1 minute, sampled on every 6 seconds - load.avg1min - - - - CPU - cpu - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 410 - - - CPU load average over the past 5 minutes, sampled on every 6 seconds - load.avg5min - - - - CPU - cpu - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 411 - - - CPU load average over the past 15 minutes, sampled on every 6 seconds - load.avg15min - - - - CPU - cpu - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 412 - - - Total amount of memory available to the host - capacity.provisioned - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 413 - - - Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservations - reservedCapacityPct - - - - Memory - mem - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 414 - - - Ratio of total requested memory and the managed memory minus 1 over the past 1 minute - overcommit.avg1min - - - - Memory - mem - - - - Number - number - - latest - absolute - 4 - 4 - - - 415 - - - Ratio of total requested memory and the managed memory minus 1 over the past 5 minutes - overcommit.avg5min - - - - Memory - mem - - - - Number - number - - latest - absolute - 4 - 4 - - - 416 - - - Ratio of total requested memory and the managed memory minus 1 over the past 15 minutes - overcommit.avg15min - - - - Memory - mem - - - - Number - number - - latest - absolute - 4 - 4 - - - 417 - - - Total amount of machine memory on the ESXi host - physical.total - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 418 - - - Amount of machine memory being used by everything other than VMkernel - physical.user - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 419 - - - Amount of machine memory that is free on the ESXi host - physical.free - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 420 - - - Total amount of machine memory managed by VMkernel - kernel.managed - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 421 - - - Mininum amount of machine memory that VMkernel likes to keep free - kernel.minfree - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 422 - - - Amount of machine memory that is currently unreserved - kernel.unreserved - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 423 - - - Amount of physical memory that is being shared - pshare.shared - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 424 - - - Amount of machine memory that is common across World(s) - pshare.common - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 425 - - - Amount of machine memory saved due to page-sharing - pshare.sharedSave - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 426 - - - Current swap usage - swap.current - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 427 - - - Where ESXi expects the reclaimed memory using swapping and compression to be - swap.target - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 428 - - - Rate at which memory is swapped in by ESXi from disk - swap.readrate - - - - Memory - mem - - - - Megabytes per second - megaBytesPerSecond - - average - rate - 4 - 4 - - - 429 - - - Rate at which memory is swapped to disk by the ESXi - swap.writerate - - - - Memory - mem - - - - Megabytes per second - megaBytesPerSecond - - average - rate - 4 - 4 - - - 430 - - - Total compressed physical memory - zip.zipped - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 431 - - - Saved memory by compression - zip.saved - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 432 - - - Total amount of physical memory reclaimed using the vmmemctl modules - memctl.current - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 433 - - - Total amount of physical memory ESXi would like to reclaim using the vmmemctl modules - memctl.target - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 434 - - - Maximum amount of physical memory ESXi can reclaim using the vmmemctl modules - memctl.max - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 455 - - - CPU time spent waiting for swap-in - swapwait - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 3 - 3 - - - 456 - - - CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) - utilization - - - - CPU - cpu - - - - Percentage - percent - - none - rate - 4 - 4 - - - 457 - - - CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) - utilization - - - - CPU - cpu - - - - Percentage - percent - - maximum - rate - 4 - 4 - - - 458 - - - CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) - utilization - - - - CPU - cpu - - - - Percentage - percent - - minimum - rate - 4 - 4 - - - 459 - - - CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) - coreUtilization - - - - CPU - cpu - - - - Percentage - percent - - none - rate - 4 - 4 - - - 460 - - - CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) - coreUtilization - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 2 - 3 - - - 461 - - - CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) - coreUtilization - - - - CPU - cpu - - - - Percentage - percent - - maximum - rate - 4 - 4 - - - 462 - - - CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) - coreUtilization - - - - CPU - cpu - - - - Percentage - percent - - minimum - rate - 4 - 4 - - - 463 - - - Total CPU capacity reserved by and available for virtual machines - totalCapacity - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - absolute - 2 - 3 - - - 464 - - - Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s) - latency - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 2 - 3 - - - 465 - - - CPU resources devoted by the ESX scheduler - entitlement - - - - CPU - cpu - - - - Megahertz - megaHertz - - latest - absolute - 2 - 3 - - - 466 - - - The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit - demand - - - - CPU - cpu - - - - Megahertz - megaHertz - - average - absolute - 2 - 3 - - - 467 - - - Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints - costop - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 2 - 3 - - - 468 - - - Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit setting - maxlimited - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 2 - 3 - - - 469 - - - Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machines - overlap - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 3 - 3 - - - 470 - - - Time the virtual machine is scheduled to run - run - - - - CPU - cpu - - - - Millisecond - millisecond - - summation - delta - 2 - 3 - - - 471 - - - CPU resource entitlement to CPU demand ratio (in percents) - demandEntitlementRatio - - - - CPU - cpu - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 472 - - - Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU - readiness - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 4 - 4 - - - 473 - - - Virtual CPU usage as a percentage during the interval - usage.vcpus - - - - CPU - cpu - - - - Percentage - percent - - average - rate - 4 - 4 - - - 474 - - - Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter - swapin - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 475 - - - Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter - swapin - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 476 - - - Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter - swapin - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 477 - - - Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter - swapin - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 478 - - - Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. - swapout - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 479 - - - Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. - swapout - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 480 - - - Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. - swapout - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 481 - - - Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. - swapout - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 482 - - - Amount of host physical memory consumed by VMkernel - sysUsage - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 483 - - - Amount of host physical memory consumed by VMkernel - sysUsage - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 484 - - - Amount of host physical memory consumed by VMkernel - sysUsage - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 485 - - - Amount of host physical memory consumed by VMkernel - sysUsage - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 486 - - - Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi - activewrite - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 487 - - - Host physical memory reserved by ESXi, for its data structures, for running the virtual machine - overheadMax - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 488 - - - Total reservation, available and consumed, for powered-on virtual machines - totalCapacity - - - - Memory - mem - - - - Megabyte - megaBytes - - average - absolute - 2 - 3 - - - 489 - - - Amount of guest physical memory pages compressed by ESXi - zipped - - - - Memory - mem - - - - Kilobyte - kiloBytes - - latest - absolute - 2 - 3 - - - 490 - - - Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memory - zipSaved - - - - Memory - mem - - - - Kilobyte - kiloBytes - - latest - absolute - 2 - 3 - - - 491 - - - Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory - latency - - - - Memory - mem - - - - Percentage - percent - - average - absolute - 2 - 3 - - - 492 - - - Amount of host physical memory the virtual machine deserves, as determined by ESXi - entitlement - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 493 - - - Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooning - lowfreethreshold - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 2 - 3 - - - 494 - - - Storage space consumed on the host swap cache for storing swapped guest physical memory pages - llSwapUsed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 495 - - - Rate at which guest physical memory is swapped in from the host swap cache - llSwapInRate - - - - Memory - mem - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 496 - - - Rate at which guest physical memory is swapped out to the host swap cache - llSwapOutRate - - - - Memory - mem - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 497 - - - Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXi - overheadTouched - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 498 - - - Storage space consumed on the host swap cache for storing swapped guest physical memory pages - llSwapUsed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 499 - - - Storage space consumed on the host swap cache for storing swapped guest physical memory pages - llSwapUsed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 500 - - - Storage space consumed on the host swap cache for storing swapped guest physical memory pages - llSwapUsed - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 501 - - - Amount of guest physical memory swapped in from host cache - llSwapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 502 - - - Amount of guest physical memory swapped in from host cache - llSwapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 503 - - - Amount of guest physical memory swapped in from host cache - llSwapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 504 - - - Amount of guest physical memory swapped in from host cache - llSwapIn - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 505 - - - Amount of guest physical memory swapped out to the host swap cache - llSwapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 506 - - - Amount of guest physical memory swapped out to the host swap cache - llSwapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 507 - - - Amount of guest physical memory swapped out to the host swap cache - llSwapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 508 - - - Amount of guest physical memory swapped out to the host swap cache - llSwapOut - - - - Memory - mem - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 509 - - - Space used for holding VMFS Pointer Blocks in memory - vmfs.pbc.size - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 510 - - - Maximum size the VMFS Pointer Block Cache can grow to - vmfs.pbc.sizeMax - - - - Memory - mem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 511 - - - Amount of file blocks whose addresses are cached in the VMFS PB Cache - vmfs.pbc.workingSet - - - - Memory - mem - - - - Terabyte - teraBytes - - latest - absolute - 4 - 4 - - - 512 - - - Maximum amount of file blocks whose addresses are cached in the VMFS PB Cache - vmfs.pbc.workingSetMax - - - - Memory - mem - - - - Terabyte - teraBytes - - latest - absolute - 4 - 4 - - - 513 - - - Amount of VMFS heap used by the VMFS PB Cache - vmfs.pbc.overhead - - - - Memory - mem - - - - Kilobyte - kiloBytes - - latest - absolute - 4 - 4 - - - 514 - - - Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cache - vmfs.pbc.capMissRatio - - - - Memory - mem - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 515 - - - Number of SCSI commands issued during the collection interval - commands - - - - Disk - disk - - - - Number - number - - summation - delta - 2 - 3 - - - 516 - - - Average amount of time, in milliseconds, to read from the physical device - deviceReadLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 517 - - - Average amount of time, in milliseconds, spent by VMkernel to process each SCSI read command - kernelReadLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 518 - - - Average amount of time taken during the collection interval to process a SCSI read command issued from the guest OS to the virtual machine - totalReadLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 519 - - - Average amount of time spent in the VMkernel queue, per SCSI read command, during the collection interval - queueReadLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 520 - - - Average amount of time, in milliseconds, to write to the physical device - deviceWriteLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 521 - - - Average amount of time, in milliseconds, spent by VMkernel to process each SCSI write command - kernelWriteLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 522 - - - Average amount of time taken during the collection interval to process a SCSI write command issued by the guest OS to the virtual machine - totalWriteLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 523 - - - Average amount of time spent in the VMkernel queue, per SCSI write command, during the collection interval - queueWriteLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 524 - - - Average amount of time, in milliseconds, to complete a SCSI command from the physical device - deviceLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 1 - 3 - - - 525 - - - Average amount of time, in milliseconds, spent by VMkernel to process each SCSI command - kernelLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 526 - - - Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval - queueLatency - - - - Disk - disk - - - - Millisecond - millisecond - - average - absolute - 2 - 3 - - - 527 - - - Maximum queue depth - maxQueueDepth - - - - Disk - disk - - - - Number - number - - average - absolute - 1 - 3 - - - 528 - - - Average number of SCSI commands issued per second during the collection interval - commandsAveraged - - - - Disk - disk - - - - Number - number - - average - rate - 2 - 3 - - - 529 - - - Number of receives dropped - droppedRx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 530 - - - Number of transmits dropped - droppedTx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 531 - - - Average amount of data received per second - bytesRx - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 532 - - - Average amount of data transmitted per second - bytesTx - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 2 - 3 - - - 533 - - - Number of broadcast packets received during the sampling interval - broadcastRx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 534 - - - Number of broadcast packets transmitted during the sampling interval - broadcastTx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 535 - - - Number of multicast packets received during the sampling interval - multicastRx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 536 - - - Number of multicast packets transmitted during the sampling interval - multicastTx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 537 - - - Number of packets with errors received during the sampling interval - errorsRx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 538 - - - Number of packets with errors transmitted during the sampling interval - errorsTx - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 539 - - - Number of frames with unknown protocol received during the sampling interval - unknownProtos - - - - Network - net - - - - Number - number - - summation - delta - 2 - 3 - - - 540 - - - pnicBytesRx - pnicBytesRx - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 541 - - - pnicBytesTx - pnicBytesTx - - - - Network - net - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 542 - - - Number of heartbeats issued per virtual machine during the interval - heartbeat - - - - System - sys - - - - Number - number - - latest - absolute - 4 - 4 - - - 543 - - - Amount of disk space usage for each mount point - diskUsage - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 544 - - - Amount of CPU used by the Service Console and other applications during the interval - resourceCpuUsage - - - - System - sys - - - - Megahertz - megaHertz - - none - rate - 4 - 4 - - - 545 - - - Amount of CPU used by the Service Console and other applications during the interval - resourceCpuUsage - - - - System - sys - - - - Megahertz - megaHertz - - average - rate - 3 - 3 - - - 546 - - - Amount of CPU used by the Service Console and other applications during the interval - resourceCpuUsage - - - - System - sys - - - - Megahertz - megaHertz - - maximum - rate - 4 - 4 - - - 547 - - - Amount of CPU used by the Service Console and other applications during the interval - resourceCpuUsage - - - - System - sys - - - - Megahertz - megaHertz - - minimum - rate - 4 - 4 - - - 548 - - - Memory touched by the system resource group - resourceMemTouched - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 549 - - - Memory mapped by the system resource group - resourceMemMapped - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 550 - - - Memory saved due to sharing by the system resource group - resourceMemShared - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 551 - - - Memory swapped out by the system resource group - resourceMemSwapped - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 552 - - - Overhead memory consumed by the system resource group - resourceMemOverhead - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 553 - - - Memory shared by the system resource group - resourceMemCow - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 554 - - - Zero filled memory used by the system resource group - resourceMemZero - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 555 - - - CPU running average over 1 minute of the system resource group - resourceCpuRun1 - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 556 - - - CPU active average over 1 minute of the system resource group - resourceCpuAct1 - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 557 - - - CPU maximum limited over 1 minute of the system resource group - resourceCpuMaxLimited1 - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 558 - - - CPU running average over 5 minutes of the system resource group - resourceCpuRun5 - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 559 - - - CPU active average over 5 minutes of the system resource group - resourceCpuAct5 - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 560 - - - CPU maximum limited over 5 minutes of the system resource group - resourceCpuMaxLimited5 - - - - System - sys - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 561 - - - CPU allocation reservation (in MHz) of the system resource group - resourceCpuAllocMin - - - - System - sys - - - - Megahertz - megaHertz - - latest - absolute - 3 - 3 - - - 562 - - - CPU allocation limit (in MHz) of the system resource group - resourceCpuAllocMax - - - - System - sys - - - - Megahertz - megaHertz - - latest - absolute - 3 - 3 - - - 563 - - - CPU allocation shares of the system resource group - resourceCpuAllocShares - - - - System - sys - - - - Number - number - - latest - absolute - 3 - 3 - - - 564 - - - Memory allocation reservation (in KB) of the system resource group - resourceMemAllocMin - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 565 - - - Memory allocation limit (in KB) of the system resource group - resourceMemAllocMax - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 3 - 3 - - - 566 - - - Memory allocation shares of the system resource group - resourceMemAllocShares - - - - System - sys - - - - Number - number - - latest - absolute - 3 - 3 - - - 567 - - - Total time elapsed, in seconds, since last operating system boot-up - osUptime - - - - System - sys - - - - Second - second - - latest - absolute - 4 - 4 - - - 568 - - - Memory consumed by the system resource group - resourceMemConsumed - - - - System - sys - - - - Kilobyte - kiloBytes - - latest - absolute - 4 - 4 - - - 569 - - - Number of file descriptors used by the system resource group - resourceFdUsage - - - - System - sys - - - - Number - number - - latest - absolute - 4 - 4 - - - 570 - - - CPU active peak over 1 minute - actpk1 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 571 - - - CPU running average over 1 minute - runav1 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 572 - - - CPU active average over 5 minutes - actav5 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 573 - - - CPU active peak over 5 minutes - actpk5 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 574 - - - CPU running average over 5 minutes - runav5 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 575 - - - CPU active average over 15 minutes - actav15 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 576 - - - CPU active peak over 15 minutes - actpk15 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 577 - - - CPU running average over 15 minutes - runav15 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 578 - - - CPU running peak over 1 minute - runpk1 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 579 - - - Amount of CPU resources over the limit that were refused, average over 1 minute - maxLimited1 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 580 - - - CPU running peak over 5 minutes - runpk5 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 581 - - - Amount of CPU resources over the limit that were refused, average over 5 minutes - maxLimited5 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 582 - - - CPU running peak over 15 minutes - runpk15 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 583 - - - Amount of CPU resources over the limit that were refused, average over 15 minutes - maxLimited15 - - - - Resource group CPU - rescpu - - - - Percentage - percent - - latest - absolute - 3 - 3 - - - 584 - - - Group CPU sample count - sampleCount - - - - Resource group CPU - rescpu - - - - Number - number - - latest - absolute - 3 - 3 - - - 585 - - - Group CPU sample period - samplePeriod - - - - Resource group CPU - rescpu - - - - Millisecond - millisecond - - latest - absolute - 3 - 3 - - - 586 - - - Amount of total configured memory that is available for use - memUsed - - - - Management agent - managementAgent - - - - Kilobyte - kiloBytes - - average - absolute - 3 - 3 - - - 587 - - - Sum of the memory swapped by all powered-on virtual machines on the host - swapUsed - - - - Management agent - managementAgent - - - - Kilobyte - kiloBytes - - average - absolute - 3 - 3 - - - 588 - - - Amount of Service Console CPU usage - cpuUsage - - - - Management agent - managementAgent - - - - Megahertz - megaHertz - - average - rate - 3 - 3 - - - 589 - - - Average number of commands issued per second on the storage path during the collection interval - commandsAveraged - - - - Storage path - storagePath - - - - Number - number - - average - rate - 3 - 3 - - - 590 - - - Average number of read commands issued per second on the storage path during the collection interval - numberReadAveraged - - - - Storage path - storagePath - - - - Number - number - - average - rate - 3 - 3 - - - 591 - - - Average number of write commands issued per second on the storage path during the collection interval - numberWriteAveraged - - - - Storage path - storagePath - - - - Number - number - - average - rate - 3 - 3 - - - 592 - - - Rate of reading data on the storage path - read - - - - Storage path - storagePath - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 593 - - - Rate of writing data on the storage path - write - - - - Storage path - storagePath - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 3 - 3 - - - 594 - - - The average time a read issued on the storage path takes - totalReadLatency - - - - Storage path - storagePath - - - - Millisecond - millisecond - - average - absolute - 3 - 3 - - - 595 - - - The average time a write issued on the storage path takes - totalWriteLatency - - - - Storage path - storagePath - - - - Millisecond - millisecond - - average - absolute - 3 - 3 - - - 596 - - - Average read request size in bytes - readIOSize - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 4 - 4 - - - 597 - - - Average write request size in bytes - writeIOSize - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 4 - 4 - - - 598 - - - Number of seeks during the interval that were less than 64 LBNs apart - smallSeeks - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 4 - 4 - - - 599 - - - Number of seeks during the interval that were between 64 and 8192 LBNs apart - mediumSeeks - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 4 - 4 - - - 600 - - - Number of seeks during the interval that were greater than 8192 LBNs apart - largeSeeks - - - - Virtual disk - virtualDisk - - - - Number - number - - latest - absolute - 4 - 4 - - - 601 - - - Read latency in microseconds - readLatencyUS - - - - Virtual disk - virtualDisk - - - - Microsecond - microsecond - - latest - absolute - 4 - 4 - - - 602 - - - Write latency in microseconds - writeLatencyUS - - - - Virtual disk - virtualDisk - - - - Microsecond - microsecond - - latest - absolute - 4 - 4 - - - 603 - - - Storage I/O Control datastore maximum queue depth - datastoreMaxQueueDepth - - - - Datastore - datastore - - - - Number - number - - latest - absolute - 1 - 3 - - - 604 - - - unmapSize - unmapSize - - - - Datastore - datastore - - - - Megabyte - megaBytes - - summation - delta - 4 - 4 - - - 605 - - - unmapIOs - unmapIOs - - - - Datastore - datastore - - - - Number - number - - summation - delta - 4 - 4 - - - 606 - - - Current number of replicated virtual machines - hbrNumVms - - - - vSphere Replication - hbr - - - - Number - number - - average - absolute - 4 - 4 - - - 607 - - - Average amount of data received per second - hbrNetRx - - - - vSphere Replication - hbr - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 608 - - - Average amount of data transmitted per second - hbrNetTx - - - - vSphere Replication - hbr - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 609 - - - Number of caches controlled by the virtual flash module - numActiveVMDKs - - - - Virtual flash module related statistical values - vflashModule - - - - Number - number - - latest - absolute - 4 - 4 - - - 610 - - - Read IOPS - readIops - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Number - number - - average - rate - 4 - 4 - - - 611 - - - Read throughput in kBps - readThroughput - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 612 - - - Average read latency in ms - readAvgLatency - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 613 - - - Max read latency in ms - readMaxLatency - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Millisecond - millisecond - - latest - absolute - 4 - 4 - - - 614 - - - Cache hit rate percentage - readCacheHitRate - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Percentage - percent - - latest - absolute - 4 - 4 - - - 615 - - - Read congestion - readCongestion - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Number - number - - average - rate - 4 - 4 - - - 616 - - - Write IOPS - writeIops - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Number - number - - average - rate - 4 - 4 - - - 617 - - - Write throughput in kBps - writeThroughput - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 618 - - - Average write latency in ms - writeAvgLatency - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 619 - - - Max write latency in ms - writeMaxLatency - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Millisecond - millisecond - - latest - absolute - 4 - 4 - - - 620 - - - Write congestion - writeCongestion - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Number - number - - average - rate - 4 - 4 - - - 621 - - - Recovery write IOPS - recoveryWriteIops - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Number - number - - average - rate - 4 - 4 - - - 622 - - - Recovery write through-put in kBps - recoveryWriteThroughput - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Kilobytes per second - kiloBytesPerSecond - - average - rate - 4 - 4 - - - 623 - - - Average recovery write latency in ms - recoveryWriteAvgLatency - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Millisecond - millisecond - - average - absolute - 4 - 4 - - - 624 - - - Max recovery write latency in ms - recoveryWriteMaxLatency - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Millisecond - millisecond - - latest - absolute - 4 - 4 - - - 625 - - - Recovery write congestion - recoveryWriteCongestion - - - - vSAN DOM object related statistical values - vsanDomObj - - - - Number - number - - average - rate - 4 - 4 - - - 626 - - - The utilization of a GPU in percentages - utilization - - - - GPU - gpu - - - - Percentage - percent - - none - absolute - 4 - 4 - - - 627 - - - The utilization of a GPU in percentages - utilization - - - - GPU - gpu - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 628 - - - The utilization of a GPU in percentages - utilization - - - - GPU - gpu - - - - Percentage - percent - - maximum - absolute - 4 - 4 - - - 629 - - - The utilization of a GPU in percentages - utilization - - - - GPU - gpu - - - - Percentage - percent - - minimum - absolute - 4 - 4 - - - 630 - - - The amount of GPU memory used in kilobytes - mem.used - - - - GPU - gpu - - - - Kilobyte - kiloBytes - - none - absolute - 4 - 4 - - - 631 - - - The amount of GPU memory used in kilobytes - mem.used - - - - GPU - gpu - - - - Kilobyte - kiloBytes - - average - absolute - 4 - 4 - - - 632 - - - The amount of GPU memory used in kilobytes - mem.used - - - - GPU - gpu - - - - Kilobyte - kiloBytes - - maximum - absolute - 4 - 4 - - - 633 - - - The amount of GPU memory used in kilobytes - mem.used - - - - GPU - gpu - - - - Kilobyte - kiloBytes - - minimum - absolute - 4 - 4 - - - 634 - - - The amount of GPU memory used in percentages of the total available - mem.usage - - - - GPU - gpu - - - - Percentage - percent - - none - absolute - 4 - 4 - - - 635 - - - The amount of GPU memory used in percentages of the total available - mem.usage - - - - GPU - gpu - - - - Percentage - percent - - average - absolute - 4 - 4 - - - 636 - - - The amount of GPU memory used in percentages of the total available - mem.usage - - - - GPU - gpu - - - - Percentage - percent - - maximum - absolute - 4 - 4 - - - 637 - - - The amount of GPU memory used in percentages of the total available - mem.usage - - - - GPU - gpu - - - - Percentage - percent - - minimum - absolute - 4 - 4 - - - 638 - - - The temperature of a GPU in degrees celsius - temperature - - - - GPU - gpu - - - - Temperature in degrees Celsius - celsius - - average - absolute - 4 - 4 - - - 639 - - - Persistent memory available reservation on a host. - available.reservation - - - - PMEM - pmem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 640 - - - Persistent memory reservation managed by DRS on a host. - drsmanaged.reservation - - - - PMEM - pmem - - - - Megabyte - megaBytes - - latest - absolute - 4 - 4 - - - 701 - - - Memory reservation health state, 2->Red, 1->Green - health.reservationState - - - - Memory - mem - - - - Number - number - - latest - absolute - 4 - 4 - - - + + PerfMgr + + perfCounter + + + 1 + + + CPU usage as a percentage during the interval + usage + + + + CPU + cpu + + + + Percentage + percent + + none + rate + 4 + 4 + + + 2 + + + CPU usage as a percentage during the interval + usage + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 1 + 3 + + + 3 + + + CPU usage as a percentage during the interval + usage + + + + CPU + cpu + + + + Percentage + percent + + minimum + rate + 4 + 4 + + + 4 + + + CPU usage as a percentage during the interval + usage + + + + CPU + cpu + + + + Percentage + percent + + maximum + rate + 4 + 4 + + + 5 + + + CPU usage in megahertz during the interval + usagemhz + + + + CPU + cpu + + + + Megahertz + megaHertz + + none + rate + 4 + 4 + + + 6 + + + CPU usage in megahertz during the interval + usagemhz + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + rate + 1 + 3 + + + 7 + + + CPU usage in megahertz during the interval + usagemhz + + + + CPU + cpu + + + + Megahertz + megaHertz + + minimum + rate + 4 + 4 + + + 8 + + + CPU usage in megahertz during the interval + usagemhz + + + + CPU + cpu + + + + Megahertz + megaHertz + + maximum + rate + 4 + 4 + + + 9 + + + Total CPU capacity reserved by virtual machines + reservedCapacity + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + absolute + 2 + 3 + + + 10 + + + Amount of time spent on system processes on each virtual CPU in the virtual machine + system + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 3 + 3 + + + 11 + + + Total CPU time spent in wait state + wait + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 3 + 3 + + + 12 + + + Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval + ready + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 1 + 3 + + + 13 + + + Total time that the CPU spent in an idle state + idle + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 2 + 3 + + + 14 + + + Total CPU usage + used + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 3 + 3 + + + 15 + + + Capacity in MHz of the physical CPU cores + capacity.provisioned + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + absolute + 4 + 4 + + + 16 + + + CPU resources devoted by the ESXi scheduler to the virtual machines and resource pools + capacity.entitlement + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + absolute + 4 + 4 + + + 17 + + + CPU usage as a percent during the interval + capacity.usage + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + rate + 4 + 4 + + + 18 + + + The amount of CPU resources a VM would use if there were no CPU contention + capacity.demand + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + absolute + 4 + 4 + + + 19 + + + Percent of time the VM is unable to run because it is contending for access to the physical CPU(s) + capacity.contention + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 4 + 4 + + + 20 + + + The number of virtual processors provisioned to the entity + corecount.provisioned + + + + CPU + cpu + + + + Number + number + + average + absolute + 4 + 4 + + + 21 + + + The number of virtual processors running on the host + corecount.usage + + + + CPU + cpu + + + + Number + number + + average + absolute + 4 + 4 + + + 22 + + + Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraints + corecount.contention + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 4 + 4 + + + 23 + + + Percentage of host physical memory that has been consumed + usage + + + + Memory + mem + + + + Percentage + percent + + none + absolute + 4 + 4 + + + 24 + + + Percentage of host physical memory that has been consumed + usage + + + + Memory + mem + + + + Percentage + percent + + average + absolute + 1 + 3 + + + 25 + + + Percentage of host physical memory that has been consumed + usage + + + + Memory + mem + + + + Percentage + percent + + minimum + absolute + 4 + 4 + + + 26 + + + Percentage of host physical memory that has been consumed + usage + + + + Memory + mem + + + + Percentage + percent + + maximum + absolute + 4 + 4 + + + 27 + + + Memory reservation consumed by powered-on virtual machines + reservedCapacity + + + + Memory + mem + + + + Megabyte + megaBytes + + average + absolute + 2 + 3 + + + 28 + + + Amount of host physical memory or physical memory that is mapped for a virtual machine or a host + granted + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 29 + + + Amount of host physical memory or physical memory that is mapped for a virtual machine or a host + granted + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 30 + + + Amount of host physical memory or physical memory that is mapped for a virtual machine or a host + granted + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 31 + + + Amount of host physical memory or physical memory that is mapped for a virtual machine or a host + granted + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 32 + + + Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi + active + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 33 + + + Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi + active + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 34 + + + Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi + active + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 35 + + + Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi + active + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 36 + + + Amount of guest physical memory that is shared within a single virtual machine or across virtual machines + shared + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 37 + + + Amount of guest physical memory that is shared within a single virtual machine or across virtual machines + shared + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 38 + + + Amount of guest physical memory that is shared within a single virtual machine or across virtual machines + shared + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 39 + + + Amount of guest physical memory that is shared within a single virtual machine or across virtual machines + shared + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 40 + + + Guest physical memory pages whose content is 0x00 + zero + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 41 + + + Guest physical memory pages whose content is 0x00 + zero + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 42 + + + Guest physical memory pages whose content is 0x00 + zero + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 43 + + + Guest physical memory pages whose content is 0x00 + zero + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 44 + + + Amount by which reservation can be raised + unreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 45 + + + Amount by which reservation can be raised + unreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 46 + + + Amount by which reservation can be raised + unreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 47 + + + Amount by which reservation can be raised + unreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 48 + + + Swap storage space consumed + swapused + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 49 + + + Swap storage space consumed + swapused + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 50 + + + Swap storage space consumed + swapused + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 51 + + + Swap storage space consumed + swapused + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 52 + + + swapunreserved + swapunreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 53 + + + swapunreserved + swapunreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 54 + + + swapunreserved + swapunreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 55 + + + swapunreserved + swapunreserved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 56 + + + Amount of host physical memory that backs shared guest physical memory (Shared) + sharedcommon + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 57 + + + Amount of host physical memory that backs shared guest physical memory (Shared) + sharedcommon + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 58 + + + Amount of host physical memory that backs shared guest physical memory (Shared) + sharedcommon + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 59 + + + Amount of host physical memory that backs shared guest physical memory (Shared) + sharedcommon + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 60 + + + Virtual address space of ESXi that is dedicated to its heap + heap + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 61 + + + Virtual address space of ESXi that is dedicated to its heap + heap + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 62 + + + Virtual address space of ESXi that is dedicated to its heap + heap + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 63 + + + Virtual address space of ESXi that is dedicated to its heap + heap + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 64 + + + Free address space in the heap of ESXi. This is less than or equal to Heap + heapfree + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 65 + + + Free address space in the heap of ESXi. This is less than or equal to Heap + heapfree + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 66 + + + Free address space in the heap of ESXi. This is less than or equal to Heap + heapfree + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 67 + + + Free address space in the heap of ESXi. This is less than or equal to Heap + heapfree + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 68 + + + Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machines + state + + + + Memory + mem + + + + Number + number + + latest + absolute + 2 + 3 + + + 69 + + + Amount of guest physical memory that is swapped out to the swap space + swapped + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 70 + + + Amount of guest physical memory that is swapped out to the swap space + swapped + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 71 + + + Amount of guest physical memory that is swapped out to the swap space + swapped + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 72 + + + Amount of guest physical memory that is swapped out to the swap space + swapped + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 73 + + + Amount of memory that ESXi needs to reclaim by swapping + swaptarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 74 + + + Amount of memory that ESXi needs to reclaim by swapping + swaptarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 75 + + + Amount of memory that ESXi needs to reclaim by swapping + swaptarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 76 + + + Amount of memory that ESXi needs to reclaim by swapping + swaptarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 77 + + + swapIn + swapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 78 + + + swapIn + swapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 79 + + + swapIn + swapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 80 + + + swapIn + swapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 81 + + + swapOut + swapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 82 + + + swapOut + swapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 83 + + + swapOut + swapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 84 + + + swapOut + swapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 85 + + + Rate at which guest physical memory is swapped in from the swap space + swapinRate + + + + Memory + mem + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 1 + 3 + + + 86 + + + Rate at which guest physical memory is swapped out to the swap space + swapoutRate + + + + Memory + mem + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 1 + 3 + + + 87 + + + Amount of memory that is swapped out for the Service Console + swapOut + + + + Management agent + managementAgent + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 88 + + + Amount of memory that is swapped in for the Service Console + swapIn + + + + Management agent + managementAgent + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 89 + + + Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest + vmmemctl + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 90 + + + Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest + vmmemctl + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 1 + 3 + + + 91 + + + Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest + vmmemctl + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 92 + + + Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest + vmmemctl + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 93 + + + Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi + vmmemctltarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 94 + + + Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi + vmmemctltarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 95 + + + Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi + vmmemctltarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 96 + + + Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi + vmmemctltarget + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 97 + + + Amount of host physical memory consumed for backing up guest physical memory pages + consumed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 98 + + + Amount of host physical memory consumed for backing up guest physical memory pages + consumed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 1 + 3 + + + 99 + + + Amount of host physical memory consumed for backing up guest physical memory pages + consumed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 100 + + + Amount of host physical memory consumed for backing up guest physical memory pages + consumed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 101 + + + Host physical memory consumed by ESXi data structures for running the virtual machines + overhead + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 102 + + + Host physical memory consumed by ESXi data structures for running the virtual machines + overhead + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 1 + 1 + + + 103 + + + Host physical memory consumed by ESXi data structures for running the virtual machines + overhead + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 104 + + + Host physical memory consumed by ESXi data structures for running the virtual machines + overhead + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 105 + + + Guest physical memory pages that have undergone memory compression + compressed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 106 + + + Rate of guest physical memory page compression by ESXi + compressionRate + + + + Memory + mem + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 107 + + + Rate of guest physical memory decompression + decompressionRate + + + + Memory + mem + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 108 + + + Total amount of memory available to the host + capacity.provisioned + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 109 + + + Amount of host physical memory the VM is entitled to, as determined by the ESXi scheduler + capacity.entitlement + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 110 + + + Amount of physical memory available for use by virtual machines on this host + capacity.usable + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 111 + + + Amount of physical memory actively used + capacity.usage + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 112 + + + Percentage of time VMs are waiting to access swapped, compressed or ballooned memory + capacity.contention + + + + Memory + mem + + + + Percentage + percent + + average + rate + 4 + 4 + + + 113 + + + vm + capacity.usage.vm + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 114 + + + vmOvrhd + capacity.usage.vmOvrhd + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 115 + + + vmkOvrhd + capacity.usage.vmkOvrhd + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 116 + + + userworld + capacity.usage.userworld + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 117 + + + vm + reservedCapacity.vm + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 118 + + + vmOvhd + reservedCapacity.vmOvhd + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 119 + + + vmkOvrhd + reservedCapacity.vmkOvrhd + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 120 + + + userworld + reservedCapacity.userworld + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 121 + + + Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservations + reservedCapacityPct + + + + Memory + mem + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 122 + + + Amount of physical memory consumed by VMs on this host + consumed.vms + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 123 + + + Amount of physical memory consumed by userworlds on this host + consumed.userworlds + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 4 + + + 124 + + + Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. + usage + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + none + rate + 4 + 4 + + + 125 + + + Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. + usage + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 1 + 3 + + + 126 + + + Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. + usage + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + minimum + rate + 4 + 4 + + + 127 + + + Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval. + usage + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + maximum + rate + 4 + 4 + + + 128 + + + Number of disk reads during the collection interval + numberRead + + + + Disk + disk + + + + Number + number + + summation + delta + 3 + 3 + + + 129 + + + Number of disk writes during the collection interval + numberWrite + + + + Disk + disk + + + + Number + number + + summation + delta + 3 + 3 + + + 130 + + + Average number of kilobytes read from the disk each second during the collection interval + read + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 131 + + + Average number of kilobytes written to disk each second during the collection interval + write + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 132 + + + Average amount of time taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine + totalLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 3 + 3 + + + 133 + + + Highest latency value across all disks used by the host + maxTotalLatency + + + + Disk + disk + + + + Millisecond + millisecond + + latest + absolute + 1 + 3 + + + 134 + + + Number of SCSI commands aborted during the collection interval + commandsAborted + + + + Disk + disk + + + + Number + number + + summation + delta + 2 + 3 + + + 135 + + + Number of SCSI-bus reset commands issued during the collection interval + busResets + + + + Disk + disk + + + + Number + number + + summation + delta + 2 + 3 + + + 136 + + + Average number of disk reads per second during the collection interval + numberReadAveraged + + + + Disk + disk + + + + Number + number + + average + rate + 1 + 3 + + + 137 + + + Average number of disk writes per second during the collection interval + numberWriteAveraged + + + + Disk + disk + + + + Number + number + + average + rate + 1 + 3 + + + 138 + + + Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection interval + throughput.usage + + + + Disk + disk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 139 + + + Average amount of time for an I/O operation to complete successfully + throughput.contention + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 140 + + + Number of SCSI reservation conflicts for the LUN during the collection interval + scsiReservationConflicts + + + + Disk + disk + + + + Number + number + + summation + delta + 2 + 2 + + + 141 + + + Number of SCSI reservation conflicts for the LUN as a percent of total commands during the collection interval + scsiReservationCnflctsPct + + + + Disk + disk + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 142 + + + Network utilization (combined transmit-rates and receive-rates) during the interval + usage + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + none + rate + 4 + 4 + + + 143 + + + Network utilization (combined transmit-rates and receive-rates) during the interval + usage + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 1 + 3 + + + 144 + + + Network utilization (combined transmit-rates and receive-rates) during the interval + usage + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + minimum + rate + 4 + 4 + + + 145 + + + Network utilization (combined transmit-rates and receive-rates) during the interval + usage + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + maximum + rate + 4 + 4 + + + 146 + + + Number of packets received during the interval + packetsRx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 147 + + + Number of packets transmitted during the interval + packetsTx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 148 + + + Average rate at which data was received during the interval + received + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 149 + + + Average rate at which data was transmitted during the interval + transmitted + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 150 + + + The maximum network bandwidth for the host + throughput.provisioned + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + absolute + 4 + 4 + + + 151 + + + The current available network bandwidth for the host + throughput.usable + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + absolute + 4 + 4 + + + 152 + + + The current network bandwidth usage for the host + throughput.usage + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 153 + + + The aggregate network droppped packets for the host + throughput.contention + + + + Network + net + + + + Number + number + + summation + delta + 4 + 4 + + + 154 + + + Average rate of packets received and transmitted per second + throughput.packetsPerSec + + + + Network + net + + + + Number + number + + average + rate + 4 + 4 + + + 155 + + + Total time elapsed, in seconds, since last system startup + uptime + + + + System + sys + + + + Second + second + + latest + absolute + 1 + 3 + + + 156 + + + Number of heartbeats issued per virtual machine during the interval + heartbeat + + + + System + sys + + + + Number + number + + summation + delta + 1 + 3 + + + 157 + + + Current power usage + power + + + + Power + power + + + + Watt + watt + + average + rate + 2 + 3 + + + 158 + + + Maximum allowed power usage + powerCap + + + + Power + power + + + + Watt + watt + + average + absolute + 3 + 3 + + + 159 + + + Total energy used since last stats reset + energy + + + + Power + power + + + + Joule + joule + + summation + delta + 3 + 3 + + + 160 + + + Current power usage as a percentage of maximum allowed power + capacity.usagePct + + + + Power + power + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 161 + + + Average number of commands issued per second by the storage adapter during the collection interval + commandsAveraged + + + + Storage adapter + storageAdapter + + + + Number + number + + average + rate + 2 + 2 + + + 162 + + + Average number of read commands issued per second by the storage adapter during the collection interval + numberReadAveraged + + + + Storage adapter + storageAdapter + + + + Number + number + + average + rate + 2 + 2 + + + 163 + + + Average number of write commands issued per second by the storage adapter during the collection interval + numberWriteAveraged + + + + Storage adapter + storageAdapter + + + + Number + number + + average + rate + 2 + 2 + + + 164 + + + Rate of reading data by the storage adapter + read + + + + Storage adapter + storageAdapter + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 2 + + + 165 + + + Rate of writing data by the storage adapter + write + + + + Storage adapter + storageAdapter + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 2 + + + 166 + + + The average time a read by the storage adapter takes + totalReadLatency + + + + Storage adapter + storageAdapter + + + + Millisecond + millisecond + + average + absolute + 2 + 2 + + + 167 + + + The average time a write by the storage adapter takes + totalWriteLatency + + + + Storage adapter + storageAdapter + + + + Millisecond + millisecond + + average + absolute + 2 + 2 + + + 168 + + + Highest latency value across all storage adapters used by the host + maxTotalLatency + + + + Storage adapter + storageAdapter + + + + Millisecond + millisecond + + latest + absolute + 3 + 3 + + + 169 + + + Average amount of time for an I/O operation to complete successfully + throughput.cont + + + + Storage adapter + storageAdapter + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 170 + + + The percent of I/Os that have been issued but have not yet completed + OIOsPct + + + + Storage adapter + storageAdapter + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 171 + + + Average number of read commands issued per second to the virtual disk during the collection interval + numberReadAveraged + + + + Virtual disk + virtualDisk + + + + Number + number + + average + rate + 1 + 3 + + + 172 + + + Average number of write commands issued per second to the virtual disk during the collection interval + numberWriteAveraged + + + + Virtual disk + virtualDisk + + + + Number + number + + average + rate + 1 + 3 + + + 173 + + + Rate of reading data from the virtual disk + read + + + + Virtual disk + virtualDisk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 2 + + + 174 + + + Rate of writing data to the virtual disk + write + + + + Virtual disk + virtualDisk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 2 + + + 175 + + + The average time a read from the virtual disk takes + totalReadLatency + + + + Virtual disk + virtualDisk + + + + Millisecond + millisecond + + average + absolute + 1 + 3 + + + 176 + + + The average time a write to the virtual disk takes + totalWriteLatency + + + + Virtual disk + virtualDisk + + + + Millisecond + millisecond + + average + absolute + 1 + 3 + + + 177 + + + Average amount of time for an I/O operation to complete successfully + throughput.cont + + + + Virtual disk + virtualDisk + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 178 + + + Average number of read commands issued per second to the datastore during the collection interval + numberReadAveraged + + + + Datastore + datastore + + + + Number + number + + average + rate + 1 + 3 + + + 179 + + + Average number of write commands issued per second to the datastore during the collection interval + numberWriteAveraged + + + + Datastore + datastore + + + + Number + number + + average + rate + 1 + 3 + + + 180 + + + Rate of reading data from the datastore + read + + + + Datastore + datastore + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 2 + + + 181 + + + Rate of writing data to the datastore + write + + + + Datastore + datastore + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 2 + + + 182 + + + The average time a read from the datastore takes + totalReadLatency + + + + Datastore + datastore + + + + Millisecond + millisecond + + average + absolute + 1 + 3 + + + 183 + + + The average time a write to the datastore takes + totalWriteLatency + + + + Datastore + datastore + + + + Millisecond + millisecond + + average + absolute + 1 + 3 + + + 184 + + + Highest latency value across all datastores used by the host + maxTotalLatency + + + + Datastore + datastore + + + + Millisecond + millisecond + + latest + absolute + 3 + 3 + + + 185 + + + Storage I/O Control aggregated IOPS + datastoreIops + + + + Datastore + datastore + + + + Number + number + + average + absolute + 1 + 3 + + + 186 + + + Storage I/O Control size-normalized I/O latency + sizeNormalizedDatastoreLatency + + + + Datastore + datastore + + + + Microsecond + microsecond + + average + absolute + 1 + 3 + + + 187 + + + usage + throughput.usage + + + + Datastore + datastore + + + + Kilobytes per second + kiloBytesPerSecond + + average + absolute + 4 + 4 + + + 188 + + + contention + throughput.contention + + + + Datastore + datastore + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 189 + + + busResets + busResets + + + + Datastore + datastore + + + + Number + number + + summation + delta + 2 + 2 + + + 190 + + + commandsAborted + commandsAborted + + + + Datastore + datastore + + + + Number + number + + summation + delta + 2 + 2 + + + 191 + + + Percentage of time Storage I/O Control actively controlled datastore latency + siocActiveTimePercentage + + + + Datastore + datastore + + + + Percentage + percent + + average + absolute + 1 + 3 + + + 192 + + + Average amount of time for an I/O operation to complete successfully + throughput.cont + + + + Storage path + storagePath + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 193 + + + Highest latency value across all storage paths used by the host + maxTotalLatency + + + + Storage path + storagePath + + + + Millisecond + millisecond + + latest + absolute + 3 + 3 + + + 194 + + + Virtual disk I/O rate + throughput.usage + + + + Virtual disk + virtualDisk + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 195 + + + Number of terminations to a virtual disk + commandsAborted + + + + Virtual disk + virtualDisk + + + + Number + number + + summation + delta + 2 + 4 + + + 196 + + + Number of resets to a virtual disk + busResets + + + + Virtual disk + virtualDisk + + + + Number + number + + summation + delta + 2 + 4 + + + 197 + + + The number of I/Os that have been issued but have not yet completed + outstandingIOs + + + + Storage adapter + storageAdapter + + + + Number + number + + average + absolute + 2 + 2 + + + 198 + + + The current number of I/Os that are waiting to be issued + queued + + + + Storage adapter + storageAdapter + + + + Number + number + + average + absolute + 2 + 2 + + + 199 + + + The maximum number of I/Os that can be outstanding at a given time + queueDepth + + + + Storage adapter + storageAdapter + + + + Number + number + + average + absolute + 2 + 2 + + + 200 + + + Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval + queueLatency + + + + Storage adapter + storageAdapter + + + + Millisecond + millisecond + + average + absolute + 2 + 2 + + + 201 + + + The storage adapter's I/O rate + throughput.usag + + + + Storage adapter + storageAdapter + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 202 + + + Number of SCSI-bus reset commands issued during the collection interval + busResets + + + + Storage path + storagePath + + + + Number + number + + summation + delta + 2 + 3 + + + 203 + + + Number of SCSI commands terminated during the collection interval + commandsAborted + + + + Storage path + storagePath + + + + Number + number + + summation + delta + 2 + 3 + + + 204 + + + Storage path I/O rate + throughput.usage + + + + Storage path + storagePath + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 205 + + + Average pNic I/O rate for VMs + throughput.usage.vm + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 206 + + + Average pNic I/O rate for NFS + throughput.usage.nfs + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 207 + + + Average pNic I/O rate for vMotion + throughput.usage.vmotion + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 208 + + + Average pNic I/O rate for FT + throughput.usage.ft + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 209 + + + Average pNic I/O rate for iSCSI + throughput.usage.iscsi + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 210 + + + Average pNic I/O rate for HBR + throughput.usage.hbr + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 211 + + + Current maximum allowed power usage + capacity.usable + + + + Power + power + + + + Watt + watt + + average + absolute + 4 + 4 + + + 212 + + + Current power usage + capacity.usage + + + + Power + power + + + + Watt + watt + + average + absolute + 4 + 4 + + + 213 + + + Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchy + cpuentitlement + + + + CPU + cpu + + + + Megahertz + megaHertz + + latest + absolute + 2 + 3 + + + 214 + + + Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or cluster + mementitlement + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 2 + 3 + + + 215 + + + DRS score of the virtual machine + vmDrsScore + + + + Cluster services + clusterServices + + + + Percentage + percent + + latest + absolute + 1 + 1 + + + 216 + + + Fairness of distributed CPU resource allocation + cpufairness + + + + Cluster services + clusterServices + + + + Number + number + + latest + absolute + 1 + 3 + + + 217 + + + Aggregate available memory resources of all the hosts within a cluster + memfairness + + + + Cluster services + clusterServices + + + + Number + number + + latest + absolute + 1 + 3 + + + 218 + + + The rate of transmitted packets for this VDS + throughput.pktsTx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 219 + + + The rate of transmitted Multicast packets for this VDS + throughput.pktsTxMulticast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 220 + + + The rate of transmitted Broadcast packets for this VDS + throughput.pktsTxBroadcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 221 + + + The rate of received packets for this vDS + throughput.pktsRx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 222 + + + The rate of received Multicast packets for this VDS + throughput.pktsRxMulticast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 223 + + + The rate of received Broadcast packets for this VDS + throughput.pktsRxBroadcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 224 + + + Count of dropped transmitted packets for this VDS + throughput.droppedTx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 225 + + + Count of dropped received packets for this VDS + throughput.droppedRx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 226 + + + The rate of transmitted packets for this DVPort + throughput.vds.pktsTx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 227 + + + The rate of transmitted multicast packets for this DVPort + throughput.vds.pktsTxMcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 228 + + + The rate of transmitted broadcast packets for this DVPort + throughput.vds.pktsTxBcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 229 + + + The rate of received packets for this DVPort + throughput.vds.pktsRx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 230 + + + The rate of received multicast packets for this DVPort + throughput.vds.pktsRxMcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 231 + + + The rate of received broadcast packets for this DVPort + throughput.vds.pktsRxBcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 232 + + + Count of dropped transmitted packets for this DVPort + throughput.vds.droppedTx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 233 + + + Count of dropped received packets for this DVPort + throughput.vds.droppedRx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 234 + + + The rate of transmitted packets for this LAG + throughput.vds.lagTx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 235 + + + The rate of transmitted Multicast packets for this LAG + throughput.vds.lagTxMcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 236 + + + The rate of transmitted Broadcast packets for this LAG + throughput.vds.lagTxBcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 237 + + + The rate of received packets for this LAG + throughput.vds.lagRx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 238 + + + The rate of received multicast packets for this LAG + throughput.vds.lagRxMcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 239 + + + The rate of received Broadcast packets for this LAG + throughput.vds.lagRxBcast + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 240 + + + Count of dropped transmitted packets for this LAG + throughput.vds.lagDropTx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 241 + + + Count of dropped received packets for this LAG + throughput.vds.lagDropRx + + + + Network + net + + + + Number + number + + average + absolute + 3 + 3 + + + 242 + + + Number of virtual machine power on operations + numPoweron + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 243 + + + Number of virtual machine power off operations + numPoweroff + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 244 + + + Number of virtual machine suspend operations + numSuspend + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 245 + + + Number of virtual machine reset operations + numReset + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 246 + + + Number of virtual machine guest reboot operations + numRebootGuest + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 247 + + + Number of virtual machine standby guest operations + numStandbyGuest + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 248 + + + Number of virtual machine guest shutdown operations + numShutdownGuest + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 249 + + + Number of virtual machine create operations + numCreate + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 250 + + + Number of virtual machine delete operations + numDestroy + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 251 + + + Number of virtual machine register operations + numRegister + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 252 + + + Number of virtual machine unregister operations + numUnregister + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 253 + + + Number of virtual machine reconfigure operations + numReconfigure + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 254 + + + Number of virtual machine clone operations + numClone + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 255 + + + Number of virtual machine template deploy operations + numDeploy + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 256 + + + Number of host change operations for powered-off and suspended VMs + numChangeHost + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 257 + + + Number of datastore change operations for powered-off and suspended virtual machines + numChangeDS + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 258 + + + Number of host and datastore change operations for powered-off and suspended virtual machines + numChangeHostDS + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 259 + + + Number of migrations with vMotion (host change operations for powered-on VMs) + numVMotion + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 260 + + + Number of migrations with Storage vMotion (datastore change operations for powered-on VMs) + numSVMotion + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 261 + + + Number of host and datastore change operations for powered-on and suspended virtual machines + numXVMotion + + + + Virtual machine operations + vmop + + + + Number + number + + latest + absolute + 1 + 3 + + + 262 + + + Total available CPU resources of all hosts within a cluster + effectivecpu + + + + Cluster services + clusterServices + + + + Megahertz + megaHertz + + average + rate + 1 + 3 + + + 263 + + + Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memory + effectivemem + + + + Cluster services + clusterServices + + + + Megabyte + megaBytes + + average + absolute + 1 + 3 + + + 264 + + + Total amount of CPU resources of all hosts in the cluster + totalmhz + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + rate + 1 + 3 + + + 265 + + + Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memory + totalmb + + + + Memory + mem + + + + Megabyte + megaBytes + + average + absolute + 1 + 3 + + + 266 + + + DRS score of the cluster + clusterDrsScore + + + + Cluster services + clusterServices + + + + Percentage + percent + + latest + absolute + 1 + 1 + + + 267 + + + vSphere HA number of failures that can be tolerated + failover + + + + Cluster services + clusterServices + + + + Number + number + + latest + absolute + 1 + 3 + + + 268 + + + Amount of space actually used by the virtual machine or the datastore + used + + + + Disk + disk + + + + Kilobyte + kiloBytes + + latest + absolute + 1 + 1 + + + 269 + + + Amount of storage set aside for use by a datastore or a virtual machine + provisioned + + + + Disk + disk + + + + Kilobyte + kiloBytes + + latest + absolute + 1 + 1 + + + 270 + + + Configured size of the datastore + capacity + + + + Disk + disk + + + + Kilobyte + kiloBytes + + latest + absolute + 1 + 3 + + + 271 + + + Amount of space associated exclusively with a virtual machine + unshared + + + + Disk + disk + + + + Kilobyte + kiloBytes + + latest + absolute + 1 + 1 + + + 272 + + + Storage overhead of a virtual machine or a datastore due to delta disk backings + deltaused + + + + Disk + disk + + + + Kilobyte + kiloBytes + + latest + absolute + 2 + 3 + + + 273 + + + provisioned + capacity.provisioned + + + + Disk + disk + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 274 + + + usage + capacity.usage + + + + Disk + disk + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 275 + + + contention + capacity.contention + + + + Disk + disk + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 276 + + + The latency of an activation operation in vCenter Server + activationlatencystats + + + + vCenter debugging information + vcDebugInfo + + + + Millisecond + millisecond + + maximum + absolute + 4 + 4 + + + 277 + + + The latency of an activation operation in vCenter Server + activationlatencystats + + + + vCenter debugging information + vcDebugInfo + + + + Millisecond + millisecond + + minimum + absolute + 4 + 4 + + + 278 + + + The latency of an activation operation in vCenter Server + activationlatencystats + + + + vCenter debugging information + vcDebugInfo + + + + Millisecond + millisecond + + summation + absolute + 1 + 1 + + + 279 + + + Activation operations in vCenter Server + activationstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 280 + + + Activation operations in vCenter Server + activationstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 281 + + + Activation operations in vCenter Server + activationstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 282 + + + buffersz + buffersz + + + + vCenter resource usage information + vcResources + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 283 + + + cachesz + cachesz + + + + vCenter resource usage information + vcResources + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 284 + + + Number of context switches per second on the system where vCenter Server is running + ctxswitchesrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 285 + + + diskreadsectorrate + diskreadsectorrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 4 + 4 + + + 286 + + + Number of disk reads per second on the system where vCenter Server is running + diskreadsrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 287 + + + diskwritesectorrate + diskwritesectorrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 4 + 4 + + + 288 + + + Number of disk writes per second on the system where vCenter Server is running + diskwritesrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 289 + + + The latency of a host sync operation in vCenter Server + hostsynclatencystats + + + + vCenter debugging information + vcDebugInfo + + + + Millisecond + millisecond + + maximum + absolute + 4 + 4 + + + 290 + + + The latency of a host sync operation in vCenter Server + hostsynclatencystats + + + + vCenter debugging information + vcDebugInfo + + + + Millisecond + millisecond + + minimum + absolute + 4 + 4 + + + 291 + + + The latency of a host sync operation in vCenter Server + hostsynclatencystats + + + + vCenter debugging information + vcDebugInfo + + + + Millisecond + millisecond + + summation + absolute + 1 + 1 + + + 292 + + + The number of host sync operations in vCenter Server + hostsyncstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 293 + + + The number of host sync operations in vCenter Server + hostsyncstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 294 + + + The number of host sync operations in vCenter Server + hostsyncstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 295 + + + vCenter Server inventory statistics + inventorystats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 296 + + + vCenter Server inventory statistics + inventorystats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 297 + + + vCenter Server inventory statistics + inventorystats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 298 + + + vCenter Server locking statistics + lockstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 299 + + + vCenter Server locking statistics + lockstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 300 + + + vCenter Server locking statistics + lockstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 301 + + + vCenter Server LRO statistics + lrostats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 302 + + + vCenter Server LRO statistics + lrostats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 303 + + + vCenter Server LRO statistics + lrostats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 304 + + + Miscellaneous statistics + miscstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 305 + + + Miscellaneous statistics + miscstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 306 + + + Miscellaneous statistics + miscstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 307 + + + Managed object reference counts in vCenter Server + morefregstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 308 + + + Managed object reference counts in vCenter Server + morefregstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 309 + + + Managed object reference counts in vCenter Server + morefregstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 310 + + + Rate of the number of total packets received per second on the system where vCenter Server is running + packetrecvrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 311 + + + Number of total packets sent per second on the system where vCenter Server is running + packetsentrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 312 + + + Total system CPU used on the system where vCenter Server in running + systemcpuusage + + + + vCenter resource usage information + vcResources + + + + Percentage + percent + + average + rate + 1 + 1 + + + 313 + + + Number of page faults per second on the system where vCenter Server is running + pagefaultrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 314 + + + Physical memory used by vCenter + physicalmemusage + + + + vCenter resource usage information + vcResources + + + + Kilobyte + kiloBytes + + average + absolute + 1 + 1 + + + 315 + + + CPU used by vCenter Server in privileged mode + priviledgedcpuusage + + + + vCenter resource usage information + vcResources + + + + Percentage + percent + + average + rate + 1 + 1 + + + 316 + + + Object counts in vCenter Server + scoreboard + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 317 + + + Object counts in vCenter Server + scoreboard + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 318 + + + Object counts in vCenter Server + scoreboard + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 3 + 3 + + + 319 + + + The statistics of client sessions connected to vCenter Server + sessionstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 320 + + + The statistics of client sessions connected to vCenter Server + sessionstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 321 + + + The statistics of client sessions connected to vCenter Server + sessionstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 322 + + + Number of systems calls made per second on the system where vCenter Server is running + syscallsrate + + + + vCenter resource usage information + vcResources + + + + Number + number + + average + rate + 1 + 1 + + + 323 + + + The statistics of vCenter Server as a running system such as thread statistics and heap statistics + systemstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 324 + + + The statistics of vCenter Server as a running system such as thread statistics and heap statistics + systemstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 325 + + + The statistics of vCenter Server as a running system such as thread statistics and heap statistics + systemstats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 326 + + + CPU used by vCenter Server in user mode + usercpuusage + + + + vCenter resource usage information + vcResources + + + + Percentage + percent + + average + rate + 1 + 1 + + + 327 + + + vCenter service statistics such as events, alarms, and tasks + vcservicestats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + maximum + absolute + 4 + 4 + + + 328 + + + vCenter service statistics such as events, alarms, and tasks + vcservicestats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + minimum + absolute + 4 + 4 + + + 329 + + + vCenter service statistics such as events, alarms, and tasks + vcservicestats + + + + vCenter debugging information + vcDebugInfo + + + + Number + number + + summation + absolute + 1 + 1 + + + 330 + + + Virtual memory used by vCenter Server + virtualmemusage + + + + vCenter resource usage information + vcResources + + + + Kilobyte + kiloBytes + + average + absolute + 1 + 1 + + + 331 + + + Average number of outstanding read requests to the virtual disk during the collection interval + readOIO + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 2 + 2 + + + 332 + + + Average number of outstanding write requests to the virtual disk during the collection interval + writeOIO + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 2 + 2 + + + 333 + + + Storage DRS virtual disk metric for the read workload model + readLoadMetric + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 2 + 2 + + + 334 + + + Storage DRS virtual disk metric for the write workload model + writeLoadMetric + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 2 + 2 + + + 335 + + + CPU active average over 1 minute + actav1 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 336 + + + Storage DRS datastore bytes read + datastoreReadBytes + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 2 + 2 + + + 337 + + + Storage DRS datastore bytes written + datastoreWriteBytes + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 2 + 2 + + + 338 + + + Storage DRS datastore read I/O rate + datastoreReadIops + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 1 + 3 + + + 339 + + + Storage DRS datastore write I/O rate + datastoreWriteIops + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 1 + 3 + + + 340 + + + Storage DRS datastore outstanding read requests + datastoreReadOIO + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 1 + 3 + + + 341 + + + Storage DRS datastore outstanding write requests + datastoreWriteOIO + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 1 + 3 + + + 342 + + + Storage DRS datastore normalized read latency + datastoreNormalReadLatency + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 2 + 2 + + + 343 + + + Storage DRS datastore normalized write latency + datastoreNormalWriteLatency + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 2 + 2 + + + 344 + + + Storage DRS datastore metric for read workload model + datastoreReadLoadMetric + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 4 + 4 + + + 345 + + + Storage DRS datastore metric for write workload model + datastoreWriteLoadMetric + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 4 + 4 + + + 346 + + + The average datastore latency as seen by virtual machines + datastoreVMObservedLatency + + + + Datastore + datastore + + + + Microsecond + microsecond + + latest + absolute + 1 + 3 + + + 401 + + + Number of SCSI reservation conflicts for the LUN as a percent of total commands during the collection interval + scsiReservationCnflctsPct + + + + Disk + disk + + + + Percentage + percent + + average + rate + 4 + 4 + + + 402 + + + The number of I/Os that have been issued but have not yet completed + outstandingIOs + + + + Storage adapter + storageAdapter + + + + Number + number + + latest + absolute + 4 + 4 + + + 403 + + + The current number of I/Os that are waiting to be issued + queued + + + + Storage adapter + storageAdapter + + + + Number + number + + latest + absolute + 4 + 4 + + + 404 + + + The maximum number of I/Os that can be outstanding at a given time + queueDepth + + + + Storage adapter + storageAdapter + + + + Number + number + + latest + absolute + 4 + 4 + + + 405 + + + CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) + utilization + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 2 + 3 + + + 406 + + + The number of virtual processors provisioned to the entity + corecount.provisioned + + + + CPU + cpu + + + + Number + number + + latest + absolute + 4 + 4 + + + 407 + + + The amount of L3 cache the VM uses + cache.l3.occupancy + + + + CPU + cpu + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 408 + + + The number of virtual processors running on the host + corecount.usage + + + + CPU + cpu + + + + Number + number + + latest + absolute + 4 + 4 + + + 409 + + + CPU load average over the past 1 minute, sampled on every 6 seconds + load.avg1min + + + + CPU + cpu + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 410 + + + CPU load average over the past 5 minutes, sampled on every 6 seconds + load.avg5min + + + + CPU + cpu + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 411 + + + CPU load average over the past 15 minutes, sampled on every 6 seconds + load.avg15min + + + + CPU + cpu + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 412 + + + Total amount of memory available to the host + capacity.provisioned + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 413 + + + Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservations + reservedCapacityPct + + + + Memory + mem + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 414 + + + Ratio of total requested memory and the managed memory minus 1 over the past 1 minute + overcommit.avg1min + + + + Memory + mem + + + + Number + number + + latest + absolute + 4 + 4 + + + 415 + + + Ratio of total requested memory and the managed memory minus 1 over the past 5 minutes + overcommit.avg5min + + + + Memory + mem + + + + Number + number + + latest + absolute + 4 + 4 + + + 416 + + + Ratio of total requested memory and the managed memory minus 1 over the past 15 minutes + overcommit.avg15min + + + + Memory + mem + + + + Number + number + + latest + absolute + 4 + 4 + + + 417 + + + Total amount of machine memory on the ESXi host + physical.total + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 418 + + + Amount of machine memory being used by everything other than VMkernel + physical.user + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 419 + + + Amount of machine memory that is free on the ESXi host + physical.free + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 420 + + + Total amount of machine memory managed by VMkernel + kernel.managed + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 421 + + + Mininum amount of machine memory that VMkernel likes to keep free + kernel.minfree + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 422 + + + Amount of machine memory that is currently unreserved + kernel.unreserved + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 423 + + + Amount of physical memory that is being shared + pshare.shared + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 424 + + + Amount of machine memory that is common across World(s) + pshare.common + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 425 + + + Amount of machine memory saved due to page-sharing + pshare.sharedSave + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 426 + + + Current swap usage + swap.current + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 427 + + + Where ESXi expects the reclaimed memory using swapping and compression to be + swap.target + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 428 + + + Rate at which memory is swapped in by ESXi from disk + swap.readrate + + + + Memory + mem + + + + Megabytes per second + megaBytesPerSecond + + average + rate + 4 + 4 + + + 429 + + + Rate at which memory is swapped to disk by the ESXi + swap.writerate + + + + Memory + mem + + + + Megabytes per second + megaBytesPerSecond + + average + rate + 4 + 4 + + + 430 + + + Total compressed physical memory + zip.zipped + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 431 + + + Saved memory by compression + zip.saved + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 432 + + + Total amount of physical memory reclaimed using the vmmemctl modules + memctl.current + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 433 + + + Total amount of physical memory ESXi would like to reclaim using the vmmemctl modules + memctl.target + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 434 + + + Maximum amount of physical memory ESXi can reclaim using the vmmemctl modules + memctl.max + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 455 + + + CPU time spent waiting for swap-in + swapwait + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 3 + 3 + + + 456 + + + CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) + utilization + + + + CPU + cpu + + + + Percentage + percent + + none + rate + 4 + 4 + + + 457 + + + CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) + utilization + + + + CPU + cpu + + + + Percentage + percent + + maximum + rate + 4 + 4 + + + 458 + + + CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading) + utilization + + + + CPU + cpu + + + + Percentage + percent + + minimum + rate + 4 + 4 + + + 459 + + + CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) + coreUtilization + + + + CPU + cpu + + + + Percentage + percent + + none + rate + 4 + 4 + + + 460 + + + CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) + coreUtilization + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 2 + 3 + + + 461 + + + CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) + coreUtilization + + + + CPU + cpu + + + + Percentage + percent + + maximum + rate + 4 + 4 + + + 462 + + + CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized) + coreUtilization + + + + CPU + cpu + + + + Percentage + percent + + minimum + rate + 4 + 4 + + + 463 + + + Total CPU capacity reserved by and available for virtual machines + totalCapacity + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + absolute + 2 + 3 + + + 464 + + + Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s) + latency + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 2 + 3 + + + 465 + + + CPU resources devoted by the ESX scheduler + entitlement + + + + CPU + cpu + + + + Megahertz + megaHertz + + latest + absolute + 2 + 3 + + + 466 + + + The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit + demand + + + + CPU + cpu + + + + Megahertz + megaHertz + + average + absolute + 2 + 3 + + + 467 + + + Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints + costop + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 2 + 3 + + + 468 + + + Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit setting + maxlimited + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 2 + 3 + + + 469 + + + Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machines + overlap + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 3 + 3 + + + 470 + + + Time the virtual machine is scheduled to run + run + + + + CPU + cpu + + + + Millisecond + millisecond + + summation + delta + 2 + 3 + + + 471 + + + CPU resource entitlement to CPU demand ratio (in percents) + demandEntitlementRatio + + + + CPU + cpu + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 472 + + + Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU + readiness + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 4 + 4 + + + 473 + + + Virtual CPU usage as a percentage during the interval + usage.vcpus + + + + CPU + cpu + + + + Percentage + percent + + average + rate + 4 + 4 + + + 474 + + + Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter + swapin + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 475 + + + Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter + swapin + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 476 + + + Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter + swapin + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 477 + + + Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter + swapin + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 478 + + + Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. + swapout + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 479 + + + Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. + swapout + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 480 + + + Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. + swapout + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 481 + + + Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on. + swapout + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 482 + + + Amount of host physical memory consumed by VMkernel + sysUsage + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 483 + + + Amount of host physical memory consumed by VMkernel + sysUsage + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 484 + + + Amount of host physical memory consumed by VMkernel + sysUsage + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 485 + + + Amount of host physical memory consumed by VMkernel + sysUsage + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 486 + + + Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi + activewrite + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 487 + + + Host physical memory reserved by ESXi, for its data structures, for running the virtual machine + overheadMax + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 488 + + + Total reservation, available and consumed, for powered-on virtual machines + totalCapacity + + + + Memory + mem + + + + Megabyte + megaBytes + + average + absolute + 2 + 3 + + + 489 + + + Amount of guest physical memory pages compressed by ESXi + zipped + + + + Memory + mem + + + + Kilobyte + kiloBytes + + latest + absolute + 2 + 3 + + + 490 + + + Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memory + zipSaved + + + + Memory + mem + + + + Kilobyte + kiloBytes + + latest + absolute + 2 + 3 + + + 491 + + + Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory + latency + + + + Memory + mem + + + + Percentage + percent + + average + absolute + 2 + 3 + + + 492 + + + Amount of host physical memory the virtual machine deserves, as determined by ESXi + entitlement + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 493 + + + Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooning + lowfreethreshold + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 2 + 3 + + + 494 + + + Storage space consumed on the host swap cache for storing swapped guest physical memory pages + llSwapUsed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 495 + + + Rate at which guest physical memory is swapped in from the host swap cache + llSwapInRate + + + + Memory + mem + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 496 + + + Rate at which guest physical memory is swapped out to the host swap cache + llSwapOutRate + + + + Memory + mem + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 497 + + + Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXi + overheadTouched + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 498 + + + Storage space consumed on the host swap cache for storing swapped guest physical memory pages + llSwapUsed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 499 + + + Storage space consumed on the host swap cache for storing swapped guest physical memory pages + llSwapUsed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 500 + + + Storage space consumed on the host swap cache for storing swapped guest physical memory pages + llSwapUsed + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 501 + + + Amount of guest physical memory swapped in from host cache + llSwapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 502 + + + Amount of guest physical memory swapped in from host cache + llSwapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 503 + + + Amount of guest physical memory swapped in from host cache + llSwapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 504 + + + Amount of guest physical memory swapped in from host cache + llSwapIn + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 505 + + + Amount of guest physical memory swapped out to the host swap cache + llSwapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 506 + + + Amount of guest physical memory swapped out to the host swap cache + llSwapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 507 + + + Amount of guest physical memory swapped out to the host swap cache + llSwapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 508 + + + Amount of guest physical memory swapped out to the host swap cache + llSwapOut + + + + Memory + mem + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 509 + + + Space used for holding VMFS Pointer Blocks in memory + vmfs.pbc.size + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 510 + + + Maximum size the VMFS Pointer Block Cache can grow to + vmfs.pbc.sizeMax + + + + Memory + mem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 511 + + + Amount of file blocks whose addresses are cached in the VMFS PB Cache + vmfs.pbc.workingSet + + + + Memory + mem + + + + Terabyte + teraBytes + + latest + absolute + 4 + 4 + + + 512 + + + Maximum amount of file blocks whose addresses are cached in the VMFS PB Cache + vmfs.pbc.workingSetMax + + + + Memory + mem + + + + Terabyte + teraBytes + + latest + absolute + 4 + 4 + + + 513 + + + Amount of VMFS heap used by the VMFS PB Cache + vmfs.pbc.overhead + + + + Memory + mem + + + + Kilobyte + kiloBytes + + latest + absolute + 4 + 4 + + + 514 + + + Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cache + vmfs.pbc.capMissRatio + + + + Memory + mem + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 515 + + + Number of SCSI commands issued during the collection interval + commands + + + + Disk + disk + + + + Number + number + + summation + delta + 2 + 3 + + + 516 + + + Average amount of time, in milliseconds, to read from the physical device + deviceReadLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 517 + + + Average amount of time, in milliseconds, spent by VMkernel to process each SCSI read command + kernelReadLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 518 + + + Average amount of time taken during the collection interval to process a SCSI read command issued from the guest OS to the virtual machine + totalReadLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 519 + + + Average amount of time spent in the VMkernel queue, per SCSI read command, during the collection interval + queueReadLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 520 + + + Average amount of time, in milliseconds, to write to the physical device + deviceWriteLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 521 + + + Average amount of time, in milliseconds, spent by VMkernel to process each SCSI write command + kernelWriteLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 522 + + + Average amount of time taken during the collection interval to process a SCSI write command issued by the guest OS to the virtual machine + totalWriteLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 523 + + + Average amount of time spent in the VMkernel queue, per SCSI write command, during the collection interval + queueWriteLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 524 + + + Average amount of time, in milliseconds, to complete a SCSI command from the physical device + deviceLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 1 + 3 + + + 525 + + + Average amount of time, in milliseconds, spent by VMkernel to process each SCSI command + kernelLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 526 + + + Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval + queueLatency + + + + Disk + disk + + + + Millisecond + millisecond + + average + absolute + 2 + 3 + + + 527 + + + Maximum queue depth + maxQueueDepth + + + + Disk + disk + + + + Number + number + + average + absolute + 1 + 3 + + + 528 + + + Average number of SCSI commands issued per second during the collection interval + commandsAveraged + + + + Disk + disk + + + + Number + number + + average + rate + 2 + 3 + + + 529 + + + Number of receives dropped + droppedRx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 530 + + + Number of transmits dropped + droppedTx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 531 + + + Average amount of data received per second + bytesRx + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 532 + + + Average amount of data transmitted per second + bytesTx + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 2 + 3 + + + 533 + + + Number of broadcast packets received during the sampling interval + broadcastRx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 534 + + + Number of broadcast packets transmitted during the sampling interval + broadcastTx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 535 + + + Number of multicast packets received during the sampling interval + multicastRx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 536 + + + Number of multicast packets transmitted during the sampling interval + multicastTx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 537 + + + Number of packets with errors received during the sampling interval + errorsRx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 538 + + + Number of packets with errors transmitted during the sampling interval + errorsTx + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 539 + + + Number of frames with unknown protocol received during the sampling interval + unknownProtos + + + + Network + net + + + + Number + number + + summation + delta + 2 + 3 + + + 540 + + + pnicBytesRx + pnicBytesRx + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 541 + + + pnicBytesTx + pnicBytesTx + + + + Network + net + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 542 + + + Number of heartbeats issued per virtual machine during the interval + heartbeat + + + + System + sys + + + + Number + number + + latest + absolute + 4 + 4 + + + 543 + + + Amount of disk space usage for each mount point + diskUsage + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 544 + + + Amount of CPU used by the Service Console and other applications during the interval + resourceCpuUsage + + + + System + sys + + + + Megahertz + megaHertz + + none + rate + 4 + 4 + + + 545 + + + Amount of CPU used by the Service Console and other applications during the interval + resourceCpuUsage + + + + System + sys + + + + Megahertz + megaHertz + + average + rate + 3 + 3 + + + 546 + + + Amount of CPU used by the Service Console and other applications during the interval + resourceCpuUsage + + + + System + sys + + + + Megahertz + megaHertz + + maximum + rate + 4 + 4 + + + 547 + + + Amount of CPU used by the Service Console and other applications during the interval + resourceCpuUsage + + + + System + sys + + + + Megahertz + megaHertz + + minimum + rate + 4 + 4 + + + 548 + + + Memory touched by the system resource group + resourceMemTouched + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 549 + + + Memory mapped by the system resource group + resourceMemMapped + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 550 + + + Memory saved due to sharing by the system resource group + resourceMemShared + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 551 + + + Memory swapped out by the system resource group + resourceMemSwapped + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 552 + + + Overhead memory consumed by the system resource group + resourceMemOverhead + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 553 + + + Memory shared by the system resource group + resourceMemCow + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 554 + + + Zero filled memory used by the system resource group + resourceMemZero + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 555 + + + CPU running average over 1 minute of the system resource group + resourceCpuRun1 + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 556 + + + CPU active average over 1 minute of the system resource group + resourceCpuAct1 + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 557 + + + CPU maximum limited over 1 minute of the system resource group + resourceCpuMaxLimited1 + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 558 + + + CPU running average over 5 minutes of the system resource group + resourceCpuRun5 + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 559 + + + CPU active average over 5 minutes of the system resource group + resourceCpuAct5 + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 560 + + + CPU maximum limited over 5 minutes of the system resource group + resourceCpuMaxLimited5 + + + + System + sys + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 561 + + + CPU allocation reservation (in MHz) of the system resource group + resourceCpuAllocMin + + + + System + sys + + + + Megahertz + megaHertz + + latest + absolute + 3 + 3 + + + 562 + + + CPU allocation limit (in MHz) of the system resource group + resourceCpuAllocMax + + + + System + sys + + + + Megahertz + megaHertz + + latest + absolute + 3 + 3 + + + 563 + + + CPU allocation shares of the system resource group + resourceCpuAllocShares + + + + System + sys + + + + Number + number + + latest + absolute + 3 + 3 + + + 564 + + + Memory allocation reservation (in KB) of the system resource group + resourceMemAllocMin + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 565 + + + Memory allocation limit (in KB) of the system resource group + resourceMemAllocMax + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 3 + 3 + + + 566 + + + Memory allocation shares of the system resource group + resourceMemAllocShares + + + + System + sys + + + + Number + number + + latest + absolute + 3 + 3 + + + 567 + + + Total time elapsed, in seconds, since last operating system boot-up + osUptime + + + + System + sys + + + + Second + second + + latest + absolute + 4 + 4 + + + 568 + + + Memory consumed by the system resource group + resourceMemConsumed + + + + System + sys + + + + Kilobyte + kiloBytes + + latest + absolute + 4 + 4 + + + 569 + + + Number of file descriptors used by the system resource group + resourceFdUsage + + + + System + sys + + + + Number + number + + latest + absolute + 4 + 4 + + + 570 + + + CPU active peak over 1 minute + actpk1 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 571 + + + CPU running average over 1 minute + runav1 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 572 + + + CPU active average over 5 minutes + actav5 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 573 + + + CPU active peak over 5 minutes + actpk5 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 574 + + + CPU running average over 5 minutes + runav5 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 575 + + + CPU active average over 15 minutes + actav15 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 576 + + + CPU active peak over 15 minutes + actpk15 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 577 + + + CPU running average over 15 minutes + runav15 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 578 + + + CPU running peak over 1 minute + runpk1 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 579 + + + Amount of CPU resources over the limit that were refused, average over 1 minute + maxLimited1 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 580 + + + CPU running peak over 5 minutes + runpk5 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 581 + + + Amount of CPU resources over the limit that were refused, average over 5 minutes + maxLimited5 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 582 + + + CPU running peak over 15 minutes + runpk15 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 583 + + + Amount of CPU resources over the limit that were refused, average over 15 minutes + maxLimited15 + + + + Resource group CPU + rescpu + + + + Percentage + percent + + latest + absolute + 3 + 3 + + + 584 + + + Group CPU sample count + sampleCount + + + + Resource group CPU + rescpu + + + + Number + number + + latest + absolute + 3 + 3 + + + 585 + + + Group CPU sample period + samplePeriod + + + + Resource group CPU + rescpu + + + + Millisecond + millisecond + + latest + absolute + 3 + 3 + + + 586 + + + Amount of total configured memory that is available for use + memUsed + + + + Management agent + managementAgent + + + + Kilobyte + kiloBytes + + average + absolute + 3 + 3 + + + 587 + + + Sum of the memory swapped by all powered-on virtual machines on the host + swapUsed + + + + Management agent + managementAgent + + + + Kilobyte + kiloBytes + + average + absolute + 3 + 3 + + + 588 + + + Amount of Service Console CPU usage + cpuUsage + + + + Management agent + managementAgent + + + + Megahertz + megaHertz + + average + rate + 3 + 3 + + + 589 + + + Average number of commands issued per second on the storage path during the collection interval + commandsAveraged + + + + Storage path + storagePath + + + + Number + number + + average + rate + 3 + 3 + + + 590 + + + Average number of read commands issued per second on the storage path during the collection interval + numberReadAveraged + + + + Storage path + storagePath + + + + Number + number + + average + rate + 3 + 3 + + + 591 + + + Average number of write commands issued per second on the storage path during the collection interval + numberWriteAveraged + + + + Storage path + storagePath + + + + Number + number + + average + rate + 3 + 3 + + + 592 + + + Rate of reading data on the storage path + read + + + + Storage path + storagePath + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 593 + + + Rate of writing data on the storage path + write + + + + Storage path + storagePath + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 3 + 3 + + + 594 + + + The average time a read issued on the storage path takes + totalReadLatency + + + + Storage path + storagePath + + + + Millisecond + millisecond + + average + absolute + 3 + 3 + + + 595 + + + The average time a write issued on the storage path takes + totalWriteLatency + + + + Storage path + storagePath + + + + Millisecond + millisecond + + average + absolute + 3 + 3 + + + 596 + + + Average read request size in bytes + readIOSize + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 4 + 4 + + + 597 + + + Average write request size in bytes + writeIOSize + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 4 + 4 + + + 598 + + + Number of seeks during the interval that were less than 64 LBNs apart + smallSeeks + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 4 + 4 + + + 599 + + + Number of seeks during the interval that were between 64 and 8192 LBNs apart + mediumSeeks + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 4 + 4 + + + 600 + + + Number of seeks during the interval that were greater than 8192 LBNs apart + largeSeeks + + + + Virtual disk + virtualDisk + + + + Number + number + + latest + absolute + 4 + 4 + + + 601 + + + Read latency in microseconds + readLatencyUS + + + + Virtual disk + virtualDisk + + + + Microsecond + microsecond + + latest + absolute + 4 + 4 + + + 602 + + + Write latency in microseconds + writeLatencyUS + + + + Virtual disk + virtualDisk + + + + Microsecond + microsecond + + latest + absolute + 4 + 4 + + + 603 + + + Storage I/O Control datastore maximum queue depth + datastoreMaxQueueDepth + + + + Datastore + datastore + + + + Number + number + + latest + absolute + 1 + 3 + + + 604 + + + unmapSize + unmapSize + + + + Datastore + datastore + + + + Megabyte + megaBytes + + summation + delta + 4 + 4 + + + 605 + + + unmapIOs + unmapIOs + + + + Datastore + datastore + + + + Number + number + + summation + delta + 4 + 4 + + + 606 + + + Current number of replicated virtual machines + hbrNumVms + + + + vSphere Replication + hbr + + + + Number + number + + average + absolute + 4 + 4 + + + 607 + + + Average amount of data received per second + hbrNetRx + + + + vSphere Replication + hbr + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 608 + + + Average amount of data transmitted per second + hbrNetTx + + + + vSphere Replication + hbr + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 609 + + + Number of caches controlled by the virtual flash module + numActiveVMDKs + + + + Virtual flash module related statistical values + vflashModule + + + + Number + number + + latest + absolute + 4 + 4 + + + 610 + + + Read IOPS + readIops + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Number + number + + average + rate + 4 + 4 + + + 611 + + + Read throughput in kBps + readThroughput + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 612 + + + Average read latency in ms + readAvgLatency + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 613 + + + Max read latency in ms + readMaxLatency + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Millisecond + millisecond + + latest + absolute + 4 + 4 + + + 614 + + + Cache hit rate percentage + readCacheHitRate + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Percentage + percent + + latest + absolute + 4 + 4 + + + 615 + + + Read congestion + readCongestion + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Number + number + + average + rate + 4 + 4 + + + 616 + + + Write IOPS + writeIops + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Number + number + + average + rate + 4 + 4 + + + 617 + + + Write throughput in kBps + writeThroughput + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 618 + + + Average write latency in ms + writeAvgLatency + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 619 + + + Max write latency in ms + writeMaxLatency + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Millisecond + millisecond + + latest + absolute + 4 + 4 + + + 620 + + + Write congestion + writeCongestion + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Number + number + + average + rate + 4 + 4 + + + 621 + + + Recovery write IOPS + recoveryWriteIops + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Number + number + + average + rate + 4 + 4 + + + 622 + + + Recovery write through-put in kBps + recoveryWriteThroughput + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Kilobytes per second + kiloBytesPerSecond + + average + rate + 4 + 4 + + + 623 + + + Average recovery write latency in ms + recoveryWriteAvgLatency + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Millisecond + millisecond + + average + absolute + 4 + 4 + + + 624 + + + Max recovery write latency in ms + recoveryWriteMaxLatency + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Millisecond + millisecond + + latest + absolute + 4 + 4 + + + 625 + + + Recovery write congestion + recoveryWriteCongestion + + + + vSAN DOM object related statistical values + vsanDomObj + + + + Number + number + + average + rate + 4 + 4 + + + 626 + + + The utilization of a GPU in percentages + utilization + + + + GPU + gpu + + + + Percentage + percent + + none + absolute + 4 + 4 + + + 627 + + + The utilization of a GPU in percentages + utilization + + + + GPU + gpu + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 628 + + + The utilization of a GPU in percentages + utilization + + + + GPU + gpu + + + + Percentage + percent + + maximum + absolute + 4 + 4 + + + 629 + + + The utilization of a GPU in percentages + utilization + + + + GPU + gpu + + + + Percentage + percent + + minimum + absolute + 4 + 4 + + + 630 + + + The amount of GPU memory used in kilobytes + mem.used + + + + GPU + gpu + + + + Kilobyte + kiloBytes + + none + absolute + 4 + 4 + + + 631 + + + The amount of GPU memory used in kilobytes + mem.used + + + + GPU + gpu + + + + Kilobyte + kiloBytes + + average + absolute + 4 + 4 + + + 632 + + + The amount of GPU memory used in kilobytes + mem.used + + + + GPU + gpu + + + + Kilobyte + kiloBytes + + maximum + absolute + 4 + 4 + + + 633 + + + The amount of GPU memory used in kilobytes + mem.used + + + + GPU + gpu + + + + Kilobyte + kiloBytes + + minimum + absolute + 4 + 4 + + + 634 + + + The amount of GPU memory used in percentages of the total available + mem.usage + + + + GPU + gpu + + + + Percentage + percent + + none + absolute + 4 + 4 + + + 635 + + + The amount of GPU memory used in percentages of the total available + mem.usage + + + + GPU + gpu + + + + Percentage + percent + + average + absolute + 4 + 4 + + + 636 + + + The amount of GPU memory used in percentages of the total available + mem.usage + + + + GPU + gpu + + + + Percentage + percent + + maximum + absolute + 4 + 4 + + + 637 + + + The amount of GPU memory used in percentages of the total available + mem.usage + + + + GPU + gpu + + + + Percentage + percent + + minimum + absolute + 4 + 4 + + + 638 + + + The temperature of a GPU in degrees celsius + temperature + + + + GPU + gpu + + + + Temperature in degrees Celsius + celsius + + average + absolute + 4 + 4 + + + 639 + + + Persistent memory available reservation on a host. + available.reservation + + + + PMEM + pmem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 640 + + + Persistent memory reservation managed by DRS on a host. + drsmanaged.reservation + + + + PMEM + pmem + + + + Megabyte + megaBytes + + latest + absolute + 4 + 4 + + + 701 + + + Memory reservation health state, 2->Red, 1->Green + health.reservationState + + + + Memory + mem + + + + Number + number + + latest + absolute + 4 + 4 + + + + - + \ No newline at end of file diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/resource-pool-default-properties.xml b/receiver/vcenterreceiver/internal/mockserver/responses/resource-pool-default-properties.xml index 6af66d19e133..653a2b954e71 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/resource-pool-default-properties.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/resource-pool-default-properties.xml @@ -4,274 +4,280 @@ xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> - + - resgroup-9 - - name - Resources - - - owner - domain-c8 - - - summary - - Resources - - resgroup-9 - - 252846 - true - 252846 - - 4000 - normal - - - - 2140385 - true - 2140385 - - 163840 - normal - - - - - - 930037628928 - 189414768640 - 1314275721216 - 1314275721216 - 210757484544 - 2244313350144 - - - 101439 - 18000 - 151407 - 151407 - 13791 - 252846 - - green - disabled - - - 13791 - 14440 - 40543 - 200994 - 13636 - 185343 - 150394 - 239546 - 199617 - 1 - 0 - 0 - 2191 - 1378 - 0 - - 236544 - - - - vm - - vm-1040 - - + + resgroup-9 + + name + Resources + + + owner + domain-c8 + + + summary + + Resources + + resgroup-9 + + 252846 + true + 252846 + + 4000 + normal + + + + 2140385 + true + 2140385 + + 163840 + normal + + + + + + 930037628928 + 189414768640 + 1314275721216 + 1314275721216 + 210757484544 + 2244313350144 + + + 101439 + 18000 + 151407 + 151407 + 13791 + 252846 + + green + disabled + + + 13791 + 14440 + 40543 + 200994 + 13636 + 185343 + 150394 + 239546 + 199617 + 1 + 0 + 0 + 2191 + 1378 + 0 + + 236544 + + + + vm + + vm-1040 + + + - resgroup-10 - - name - Resources - - - owner - domain-c9 - - - summary - - Resources - - resgroup-9 - - 252846 - true - 252846 - - 4000 - normal - - - - 2140385 - true - 2140385 - - 163840 - normal - - - - - - 930037628928 - 189414768640 - 1314275721216 - 1314275721216 - 210757484544 - 2244313350144 - - - 101439 - 18000 - 151407 - 151407 - 13791 - 252846 - - green - disabled - - - 13791 - 14440 - 40543 - 200994 - 13636 - 185343 - 150394 - 239546 - 199617 - 1 - 0 - 0 - 2191 - 1378 - 0 - - 236544 - - - - vm - - vm-6005 - - + + resgroup-10 + + name + Resources + + + owner + domain-c9 + + + summary + + Resources + + resgroup-9 + + 252846 + true + 252846 + + 4000 + normal + + + + 2140385 + true + 2140385 + + 163840 + normal + + + + + + 930037628928 + 189414768640 + 1314275721216 + 1314275721216 + 210757484544 + 2244313350144 + + + 101439 + 18000 + 151407 + 151407 + 13791 + 252846 + + green + disabled + + + 13791 + 14440 + 40543 + 200994 + 13636 + 185343 + 150394 + 239546 + 199617 + 1 + 0 + 0 + 2191 + 1378 + 0 + + 236544 + + + + vm + + vm-6005 + + + - resgroup-v10 - - name - v-app-1 - - - owner - domain-c8 - - - summary - - - - resgroup-v10 - - 0 - true - 1 - - 4000 - normal - - - - 0 - true - 1 - - 163840 - normal - - 0 - - disabled - - - - 930037628928 - 189414768640 - 1314275721216 - 1314275721216 - 210757484544 - 2244313350144 - - - 0 - 0 - 151407 - 151407 - 0 - 252846 - - green - disabled - - - 0 - 0 - 40543 - 200994 - 0 - 185343 - 150394 - 239546 - 199617 - 0 - 0 - 0 - 2191 - 1378 - 0 - - 236544 - - 0 - - + + resgroup-v10 + + name + v-app-1 + + + owner + domain-c8 + + + summary + - - - - - - - - started - false - false - 50219291-3ced-8caa-239a-79f84675881a - - - - vm - - vm-6004 - - + + resgroup-v10 + + 0 + true + 1 + + 4000 + normal + + + + 0 + true + 1 + + 163840 + normal + + 0 + + disabled + + + + 930037628928 + 189414768640 + 1314275721216 + 1314275721216 + 210757484544 + 2244313350144 + + + 0 + 0 + 151407 + 151407 + 0 + 252846 + + green + disabled + + + 0 + 0 + 40543 + 200994 + 0 + 185343 + 150394 + 239546 + 199617 + 0 + 0 + 0 + 2191 + 1378 + 0 + + 236544 + + 0 + + + + + + + + + + + started + false + false + 50219291-3ced-8caa-239a-79f84675881a + + + + vm + + vm-6004 + + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/retrieve-properties-empty.xml b/receiver/vcenterreceiver/internal/mockserver/responses/retrieve-properties-empty.xml index 26226f9bb581..a25a3c74c799 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/retrieve-properties-empty.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/retrieve-properties-empty.xml @@ -1,6 +1,6 @@ - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/vm-default-properties.xml b/receiver/vcenterreceiver/internal/mockserver/responses/vm-default-properties.xml index 59c5e1ccbddd..e46edc6fd3eb 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/vm-default-properties.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/vm-default-properties.xml @@ -1,271 +1,279 @@ - + - vm-1040 - - name - CentOS 7 - - - config.hardware.numCPU - 4 - - - config.instanceUuid - 5000bbe0-993e-5813-c56a-198eaa62fb61 - - - config.template - false - - - resourcePool - resgroup-9 - - - runtime.maxCpuUsage - 10372 - - - runtime.powerState - poweredOn - - - summary.config.memorySizeMB - 16384 - - - summary.quickStats.balloonedMemory - 0 - - - summary.quickStats.guestMemoryUsage - 163 - - - summary.quickStats.overallCpuUsage - 12 - - - summary.quickStats.ssdSwappedMemory - 0 - - - summary.quickStats.swappedMemory - 0 - - - summary.runtime.host - host-1002 - - - summary.storage.committed - 16311648256 - - - summary.storage.uncommitted - 258847277056 - + + vm-1040 + + name + CentOS 7 + + + config.hardware.numCPU + 4 + + + config.instanceUuid + 5000bbe0-993e-5813-c56a-198eaa62fb61 + + + config.template + false + + + resourcePool + resgroup-9 + + + runtime.maxCpuUsage + 10372 + + + runtime.powerState + poweredOn + + + summary.config.memorySizeMB + 16384 + + + summary.quickStats.balloonedMemory + 0 + + + summary.quickStats.guestMemoryUsage + 163 + + + summary.quickStats.overallCpuUsage + 12 + + + summary.quickStats.ssdSwappedMemory + 0 + + + summary.quickStats.swappedMemory + 0 + + + summary.runtime.host + host-1002 + + + summary.storage.committed + 16311648256 + + + summary.storage.uncommitted + 258847277056 + + - vm-6004 - - name - CentOS 8 - - - config.hardware.numCPU - 4 - - - config.instanceUuid - 5000bbe0-993e-5813-c56a-198eaa62fb62 - - - config.template - false - - - resourcePool - resgroup-v10 - - - runtime.maxCpuUsage - 10372 - - - runtime.powerState - poweredOn - - - summary.config.memorySizeMB - 16384 - - - summary.quickStats.balloonedMemory - 0 - - - summary.quickStats.guestMemoryUsage - 163 - - - summary.quickStats.overallCpuUsage - 12 - - - summary.quickStats.ssdSwappedMemory - 0 - - - summary.quickStats.swappedMemory - 0 - - - summary.runtime.host - host-1002 - - - summary.storage.committed - 16311648256 - - - summary.storage.uncommitted - 258847277056 - + + vm-6004 + + name + CentOS 8 + + + config.hardware.numCPU + 4 + + + config.instanceUuid + 5000bbe0-993e-5813-c56a-198eaa62fb62 + + + config.template + false + + + resourcePool + resgroup-v10 + + + runtime.maxCpuUsage + 10372 + + + runtime.powerState + poweredOn + + + summary.config.memorySizeMB + 16384 + + + summary.quickStats.balloonedMemory + 0 + + + summary.quickStats.guestMemoryUsage + 163 + + + summary.quickStats.overallCpuUsage + 12 + + + summary.quickStats.ssdSwappedMemory + 0 + + + summary.quickStats.swappedMemory + 0 + + + summary.runtime.host + host-1002 + + + summary.storage.committed + 16311648256 + + + summary.storage.uncommitted + 258847277056 + + - vm-6005 - - name - CentOS 9 - - - config.hardware.numCPU - 4 - - - config.instanceUuid - 5000bbe0-993e-5813-c56a-198eaa62fb63 - - - config.template - false - - - resourcePool - resgroup-10 - - - runtime.maxCpuUsage - 10372 - - - runtime.powerState - poweredOn - - - summary.config.memorySizeMB - 16384 - - - summary.quickStats.balloonedMemory - 0 - - - summary.quickStats.guestMemoryUsage - 163 - - - summary.quickStats.overallCpuUsage - 12 - - - summary.quickStats.ssdSwappedMemory - 0 - - - summary.quickStats.swappedMemory - 0 - - - summary.runtime.host - host-1003 - - - summary.storage.committed - 16311648256 - - - summary.storage.uncommitted - 258847277056 - + + vm-6005 + + name + CentOS 9 + + + config.hardware.numCPU + 4 + + + config.instanceUuid + 5000bbe0-993e-5813-c56a-198eaa62fb63 + + + config.template + false + + + resourcePool + resgroup-10 + + + runtime.maxCpuUsage + 10372 + + + runtime.powerState + poweredOn + + + summary.config.memorySizeMB + 16384 + + + summary.quickStats.balloonedMemory + 0 + + + summary.quickStats.guestMemoryUsage + 163 + + + summary.quickStats.overallCpuUsage + 12 + + + summary.quickStats.ssdSwappedMemory + 0 + + + summary.quickStats.swappedMemory + 0 + + + summary.runtime.host + host-1003 + + + summary.storage.committed + 16311648256 + + + summary.storage.uncommitted + 258847277056 + + - vm-template - - name - CentOS 7 Template - - - config.hardware.numCPU - 4 - - - config.instanceUuid - 5000bbe0-993e-5813-c56a-198eaa62fb64 - - - config.template - true - - - runtime.maxCpuUsage - 10372 - - - runtime.powerState - poweredOn - - - summary.config.memorySizeMB - 16384 - - - summary.quickStats.balloonedMemory - 0 - - - summary.quickStats.guestMemoryUsage - 163 - - - summary.quickStats.overallCpuUsage - 12 - - - summary.quickStats.ssdSwappedMemory - 0 - - - summary.quickStats.swappedMemory - 0 - - - summary.runtime.host - host-1002 - - - summary.storage.committed - 16311648256 - - - summary.storage.uncommitted - 258847277056 - + + vm-template + + name + CentOS 7 Template + + + config.hardware.numCPU + 4 + + + config.instanceUuid + 5000bbe0-993e-5813-c56a-198eaa62fb64 + + + config.template + true + + + runtime.maxCpuUsage + 10372 + + + runtime.powerState + poweredOn + + + summary.config.memorySizeMB + 16384 + + + summary.quickStats.balloonedMemory + 0 + + + summary.quickStats.guestMemoryUsage + 163 + + + summary.quickStats.overallCpuUsage + 12 + + + summary.quickStats.ssdSwappedMemory + 0 + + + summary.quickStats.swappedMemory + 0 + + + summary.runtime.host + host-1002 + + + summary.storage.committed + 16311648256 + + + summary.storage.uncommitted + 258847277056 + + - + diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-children.xml b/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-children.xml index 18a90c00f038..5e4b8e718afb 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-children.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-children.xml @@ -1,58 +1,68 @@ - + - resgroup-v10 - - name - v-app-1 - + + resgroup-v10 + + name + v-app-1 + + - vm-1040 - - name - CentOS 7 - + + vm-1040 + + name + CentOS 7 + + - vm-template - - name - CentOS 7 Template - + + vm-template + + name + CentOS 7 Template + + - group-v1034 - - childType - - Folder - VirtualMachine - VirtualApp - - - - name - HCX Management VMs - + + group-v1034 + + childType + + Folder + VirtualMachine + VirtualApp + + + + name + HCX Management VMs + + - group-v1001 - - childType - - Folder - VirtualMachine - VirtualApp - - - - name - Discovered virtual machine - + + group-v1001 + + childType + + Folder + VirtualMachine + VirtualApp + + + + name + Discovered virtual machine + + - + \ No newline at end of file diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-parents.xml b/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-parents.xml index 1d683e80ef9f..e6b75c85ba48 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-parents.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/vm-folder-parents.xml @@ -1,36 +1,42 @@ - + - group-v4 - - name - vm - - - parent - datacenter-3 - + + group-v4 + + name + vm + + + parent + datacenter-3 + + - datacenter-3 - - name - Datacenter - - - parent - group-d1 - + + datacenter-3 + + name + Datacenter + + + parent + group-d1 + + - group-d1 - - name - Datacenters - + + group-d1 + + name + Datacenters + + - + From e004ecd31d5c4963c4e2e97fee348b74304ab161 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Fri, 28 Jun 2024 09:14:33 -0700 Subject: [PATCH 2/9] Faster json parser (#33785) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Description:** Switch libraries to reduce the footprint of the JSON parser. **Link to tracking Issue:** Fixes #33784 **Testing:** Before: ``` goos: darwin goarch: arm64 pkg: github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json BenchmarkProcess-10 49047 23951 ns/op 26277 B/op 177 allocs/op BenchmarkProcess-10 50248 24002 ns/op 26275 B/op 177 allocs/op BenchmarkProcess-10 50258 24517 ns/op 26276 B/op 177 allocs/op BenchmarkProcess-10 50505 24731 ns/op 26276 B/op 177 allocs/op BenchmarkProcess-10 45708 24730 ns/op 26276 B/op 177 allocs/op BenchmarkProcess-10 50022 25021 ns/op 26277 B/op 177 allocs/op BenchmarkProcess-10 47204 24794 ns/op 26275 B/op 177 allocs/op BenchmarkProcess-10 47742 25335 ns/op 26274 B/op 177 allocs/op BenchmarkProcess-10 46252 25205 ns/op 26276 B/op 177 allocs/op BenchmarkProcess-10 47916 24379 ns/op 26277 B/op 177 allocs/op PASS ok github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json 15.559s ``` After: ``` goos: darwin goarch: arm64 pkg: github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json BenchmarkProcess-10 95613 12168 ns/op 14702 B/op 115 allocs/op BenchmarkProcess-10 97904 12343 ns/op 14704 B/op 115 allocs/op BenchmarkProcess-10 99006 12187 ns/op 14702 B/op 115 allocs/op BenchmarkProcess-10 96964 12310 ns/op 14703 B/op 115 allocs/op BenchmarkProcess-10 98661 12285 ns/op 14703 B/op 115 allocs/op BenchmarkProcess-10 96896 12356 ns/op 14703 B/op 115 allocs/op BenchmarkProcess-10 94118 12367 ns/op 14703 B/op 115 allocs/op BenchmarkProcess-10 96123 12349 ns/op 14702 B/op 115 allocs/op BenchmarkProcess-10 96948 12305 ns/op 14702 B/op 115 allocs/op BenchmarkProcess-10 96626 12225 ns/op 14702 B/op 115 allocs/op PASS ok github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json 13.619s ``` Benchstat: ``` goos: darwin goarch: arm64 pkg: github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json │ before.txt │ after.txt │ │ sec/op │ sec/op vs base │ Process-10 24.73µ ± 3% 12.31µ ± 1% -50.23% (p=0.000 n=10) │ before.txt │ after.txt │ │ B/op │ B/op vs base │ Process-10 25.66Ki ± 0% 14.36Ki ± 0% -44.05% (p=0.000 n=10) │ before.txt │ after.txt │ │ allocs/op │ allocs/op vs base │ Process-10 177.0 ± 0% 115.0 ± 0% -35.03% (p=0.000 n=10) ``` --- .chloggen/faster_json_parser.yaml | 27 +++++++++++ cmd/otelcontribcol/go.mod | 2 +- cmd/otelcontribcol/go.sum | 4 +- cmd/oteltestbedcol/go.mod | 1 + cmd/oteltestbedcol/go.sum | 2 + connector/datadogconnector/go.mod | 2 +- connector/datadogconnector/go.sum | 4 +- exporter/datadogexporter/go.mod | 2 +- exporter/datadogexporter/go.sum | 4 +- .../datadogexporter/integrationtest/go.mod | 2 +- .../datadogexporter/integrationtest/go.sum | 4 +- .../integrationtest/go.mod | 1 + .../integrationtest/go.sum | 2 + pkg/stanza/go.mod | 1 + pkg/stanza/go.sum | 2 + pkg/stanza/operator/parser/json/config.go | 2 - pkg/stanza/operator/parser/json/parser.go | 5 +-- .../operator/parser/json/parser_test.go | 24 +++++++++- .../parser/json/testdata/testdata.json | 45 +++++++++++++++++++ processor/logstransformprocessor/go.mod | 1 + processor/logstransformprocessor/go.sum | 2 + receiver/azureeventhubreceiver/go.mod | 1 + receiver/azureeventhubreceiver/go.sum | 2 + receiver/filelogreceiver/go.mod | 1 + receiver/filelogreceiver/go.sum | 2 + receiver/journaldreceiver/go.mod | 1 + receiver/journaldreceiver/go.sum | 2 + receiver/mongodbatlasreceiver/go.mod | 1 + receiver/mongodbatlasreceiver/go.sum | 2 + receiver/namedpipereceiver/go.mod | 1 + receiver/namedpipereceiver/go.sum | 2 + receiver/otlpjsonfilereceiver/go.mod | 1 + receiver/otlpjsonfilereceiver/go.sum | 2 + receiver/sqlqueryreceiver/go.mod | 2 +- receiver/sqlqueryreceiver/go.sum | 4 +- receiver/syslogreceiver/go.mod | 1 + receiver/syslogreceiver/go.sum | 2 + receiver/tcplogreceiver/go.mod | 1 + receiver/tcplogreceiver/go.sum | 2 + receiver/udplogreceiver/go.mod | 1 + receiver/udplogreceiver/go.sum | 2 + receiver/windowseventlogreceiver/go.mod | 1 + receiver/windowseventlogreceiver/go.sum | 2 + testbed/go.mod | 1 + testbed/go.sum | 2 + 45 files changed, 157 insertions(+), 21 deletions(-) create mode 100644 .chloggen/faster_json_parser.yaml create mode 100644 pkg/stanza/operator/parser/json/testdata/testdata.json diff --git a/.chloggen/faster_json_parser.yaml b/.chloggen/faster_json_parser.yaml new file mode 100644 index 000000000000..5a5fabbed7e6 --- /dev/null +++ b/.chloggen/faster_json_parser.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/stanza + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Switch JSON parser used by json_parser to github.com/goccy/go-json + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33784] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index db2cf0c9fd17..8d4f659ac027 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -483,7 +483,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gocql/gocql v1.6.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 9c18546ab5ef..fb26b551853c 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -1378,8 +1378,8 @@ github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU= github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= diff --git a/cmd/oteltestbedcol/go.mod b/cmd/oteltestbedcol/go.mod index 8c9a2590539f..53472087460f 100644 --- a/cmd/oteltestbedcol/go.mod +++ b/cmd/oteltestbedcol/go.mod @@ -107,6 +107,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect diff --git a/cmd/oteltestbedcol/go.sum b/cmd/oteltestbedcol/go.sum index 2e757f149e74..b8466641e983 100644 --- a/cmd/oteltestbedcol/go.sum +++ b/cmd/oteltestbedcol/go.sum @@ -214,6 +214,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= diff --git a/connector/datadogconnector/go.mod b/connector/datadogconnector/go.mod index cf722f1d73b9..15a0ecd9a3db 100644 --- a/connector/datadogconnector/go.mod +++ b/connector/datadogconnector/go.mod @@ -126,7 +126,7 @@ require ( github.com/go-openapi/swag v0.22.9 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/connector/datadogconnector/go.sum b/connector/datadogconnector/go.sum index 776b99a3f6f0..4c4cd8d820b5 100644 --- a/connector/datadogconnector/go.sum +++ b/connector/datadogconnector/go.sum @@ -391,8 +391,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 1aeaeec1850c..c4bd37458474 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -175,7 +175,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 183e7c362a68..7f6dfceb549d 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -428,8 +428,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= diff --git a/exporter/datadogexporter/integrationtest/go.mod b/exporter/datadogexporter/integrationtest/go.mod index 6e37de50e94b..5e4a8649eabf 100644 --- a/exporter/datadogexporter/integrationtest/go.mod +++ b/exporter/datadogexporter/integrationtest/go.mod @@ -126,7 +126,7 @@ require ( github.com/go-openapi/swag v0.22.9 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/exporter/datadogexporter/integrationtest/go.sum b/exporter/datadogexporter/integrationtest/go.sum index 776b99a3f6f0..4c4cd8d820b5 100644 --- a/exporter/datadogexporter/integrationtest/go.sum +++ b/exporter/datadogexporter/integrationtest/go.sum @@ -391,8 +391,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= diff --git a/exporter/elasticsearchexporter/integrationtest/go.mod b/exporter/elasticsearchexporter/integrationtest/go.mod index 1714a554e0ce..2188c792d0a7 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.mod +++ b/exporter/elasticsearchexporter/integrationtest/go.mod @@ -52,6 +52,7 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/exporter/elasticsearchexporter/integrationtest/go.sum b/exporter/elasticsearchexporter/integrationtest/go.sum index ffe9a0f5c3c1..a6e12b03311e 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.sum +++ b/exporter/elasticsearchexporter/integrationtest/go.sum @@ -64,6 +64,8 @@ github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsM github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= diff --git a/pkg/stanza/go.mod b/pkg/stanza/go.mod index d2657774f683..af4783605a94 100644 --- a/pkg/stanza/go.mod +++ b/pkg/stanza/go.mod @@ -7,6 +7,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/expr-lang/expr v1.16.9 github.com/fsnotify/fsnotify v1.7.0 + github.com/goccy/go-json v0.10.3 github.com/jpillora/backoff v1.0.0 github.com/json-iterator/go v1.1.12 github.com/leodido/go-syslog/v4 v4.1.0 diff --git a/pkg/stanza/go.sum b/pkg/stanza/go.sum index fe2e3c28cd7f..097c5c2ce831 100644 --- a/pkg/stanza/go.sum +++ b/pkg/stanza/go.sum @@ -20,6 +20,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/pkg/stanza/operator/parser/json/config.go b/pkg/stanza/operator/parser/json/config.go index 4bf7c0087144..bb45edb402b9 100644 --- a/pkg/stanza/operator/parser/json/config.go +++ b/pkg/stanza/operator/parser/json/config.go @@ -4,7 +4,6 @@ package json // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json" import ( - jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" @@ -43,6 +42,5 @@ func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error return &Parser{ ParserOperator: parserOperator, - json: jsoniter.ConfigFastest, }, nil } diff --git a/pkg/stanza/operator/parser/json/parser.go b/pkg/stanza/operator/parser/json/parser.go index 429a49c6ecca..7d626f97a007 100644 --- a/pkg/stanza/operator/parser/json/parser.go +++ b/pkg/stanza/operator/parser/json/parser.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - jsoniter "github.com/json-iterator/go" + "github.com/goccy/go-json" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" @@ -16,7 +16,6 @@ import ( // Parser is an operator that parses JSON. type Parser struct { helper.ParserOperator - json jsoniter.API } // Process will parse an entry for JSON. @@ -29,7 +28,7 @@ func (p *Parser) parse(value any) (any, error) { var parsedValue map[string]any switch m := value.(type) { case string: - err := p.json.UnmarshalFromString(m, &parsedValue) + err := json.Unmarshal([]byte(m), &parsedValue) if err != nil { return nil, err } diff --git a/pkg/stanza/operator/parser/json/parser_test.go b/pkg/stanza/operator/parser/json/parser_test.go index a018055e2c74..f27d1fdd8113 100644 --- a/pkg/stanza/operator/parser/json/parser_test.go +++ b/pkg/stanza/operator/parser/json/parser_test.go @@ -5,6 +5,8 @@ package json import ( "context" + "os" + "path/filepath" "testing" "time" @@ -46,7 +48,7 @@ func TestParserStringFailure(t *testing.T) { parser := newTestParser(t) _, err := parser.parse("invalid") require.Error(t, err) - require.Contains(t, err.Error(), "error found in #1 byte") + require.Contains(t, err.Error(), "expected { character for map value") } func TestParserByteFailure(t *testing.T) { @@ -164,3 +166,23 @@ func TestParser(t *testing.T) { }) } } + +func BenchmarkProcess(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + cfg := NewConfig() + + parser, err := cfg.Build(componenttest.NewNopTelemetrySettings()) + require.NoError(b, err) + + body, err := os.ReadFile(filepath.Join("testdata", "testdata.json")) + require.NoError(b, err) + + e := entry.Entry{Body: string(body)} + + for i := 0; i < b.N; i++ { + err := parser.Process(context.Background(), &e) + require.NoError(b, err) + } +} diff --git a/pkg/stanza/operator/parser/json/testdata/testdata.json b/pkg/stanza/operator/parser/json/testdata/testdata.json new file mode 100644 index 000000000000..1d2868d4994d --- /dev/null +++ b/pkg/stanza/operator/parser/json/testdata/testdata.json @@ -0,0 +1,45 @@ +{ + "_id": "667cb0db02f4dfc7648b0f6b", + "index": 0, + "guid": "2e419732-8214-4e36-a158-d3ced0217ab6", + "isActive": true, + "balance": "$1,105.05", + "picture": "http://example.com/1", + "age": 22, + "eyeColor": "blue", + "name": "Vincent Knox", + "gender": "male", + "company": "ANIVET", + "email": "vincentknox@anivet.com", + "phone": "+1 (914) 599-2454", + "address": "483 Gerritsen Avenue, Succasunna, Massachusetts, 7803", + "about": "Elit aliqua qui amet duis esse eiusmod cillum proident quis amet elit tempor dolor exercitation. Eu ut tempor exercitation excepteur est. Lorem ad elit sit reprehenderit quis ad sunt laborum amet veniam commodo sit sunt aliqua. Sint incididunt eu ut est magna amet mollit qui deserunt nostrud labore ad. Nostrud officia proident occaecat et irure ut quis culpa mollit veniam. Laboris labore ea reprehenderit veniam mollit enim et proident ipsum id. In qui sit officia laborum.\r\nIn ad consectetur duis ad nisi proident. Non in officia do mollit amet sint voluptate minim nostrud voluptate elit. Veniam Lorem cillum fugiat adipisicing qui ea commodo irure tempor ipsum pariatur sit voluptate. Eiusmod cillum occaecat excepteur cillum aliquip laboris velit aute proident amet.\r\nIpsum sunt eiusmod do ut voluptate sit anim. Consequat nisi nisi consequat amet excepteur ea ad incididunt pariatur veniam exercitation eu ex in. Incididunt sint tempor pariatur Lorem do. Occaecat laborum ad ad id enim dolor deserunt ipsum amet Lorem Lorem. Cillum veniam labore eu do duis.\r\nCillum dolor eiusmod sit amet commodo voluptate pariatur ex irure eu culpa sunt. Incididunt non exercitation est pariatur est. Incididunt mollit Lorem velit ullamco excepteur esse quis id magna et ullamco labore. Laboris consequat tempor est ea amet enim et nisi amet officia dolore magna veniam. Nostrud officia consectetur ea culpa laborum et ut Lorem laboris.\r\nDeserunt labore ullamco dolor exercitation laboris consectetur nulla cupidatat duis. Occaecat quis velit deserunt culpa nostrud eiusmod elit fugiat nulla duis deserunt Lorem do. Proident anim proident aute amet pariatur et do irure. Ad magna qui elit consequat sit exercitation sit. Magna adipisicing id esse aliqua officia magna. Et veniam aliqua minim reprehenderit in culpa. Adipisicing quis eu do Lorem cupidatat consequat ad aute quis.\r\nIn aliquip ea laborum esse dolor reprehenderit qui sit culpa occaecat. Consectetur Lorem dolore adipisicing amet incididunt. Dolor veniam Lorem nulla ex. Eiusmod amet tempor sit eiusmod do reprehenderit proident sit commodo elit cupidatat.\r\nNulla nulla consequat cillum mollit tempor eiusmod irure deserunt amet et voluptate. Fugiat et veniam culpa eiusmod minim ex pariatur. Eiusmod adipisicing pariatur pariatur adipisicing in consequat cillum ut qui veniam amet incididunt ullamco anim.\r\nDolor nulla laborum tempor adipisicing qui id. Exercitation labore aliqua ut laborum velit cupidatat officia. Est qui dolor sint laboris aliqua ea nulla culpa.\r\nAute reprehenderit nulla elit nisi reprehenderit pariatur officia veniam dolore ea occaecat nostrud sunt fugiat. Cillum consequat labore nostrud veniam nisi ea proident est officia incididunt adipisicing qui sint nisi. Ad enim reprehenderit minim labore minim irure dolor. Voluptate commodo dolor excepteur est tempor dolor sunt esse fugiat ea eu et.\r\nIpsum sit velit deserunt aliqua eu labore ad esse eu. Duis eiusmod non exercitation consequat nulla. Enim elit consectetur pariatur sunt labore sunt dolore non do. Sint consequat aliqua tempor consectetur veniam minim. Veniam eu aute occaecat consectetur dolore ullamco dolore officia.\r\n", + "registered": "2023-06-08T12:29:06 +07:00", + "latitude": -59.802339, + "longitude": -160.473187, + "tags": [ + "pariatur", + "anim", + "id", + "duis", + "fugiat", + "qui", + "veniam" + ], + "friends": [ + { + "id": 0, + "name": "Hester Bruce" + }, + { + "id": 1, + "name": "Laurel Mcknight" + }, + { + "id": 2, + "name": "Wynn Moses" + } + ], + "greeting": "Hello, Vincent Knox! You have 1 unread messages.", + "favoriteFruit": "apple" +} \ No newline at end of file diff --git a/processor/logstransformprocessor/go.mod b/processor/logstransformprocessor/go.mod index aa49933280e8..9ed3649ebddd 100644 --- a/processor/logstransformprocessor/go.mod +++ b/processor/logstransformprocessor/go.mod @@ -29,6 +29,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/processor/logstransformprocessor/go.sum b/processor/logstransformprocessor/go.sum index d93a166e748e..d9e9dca8cf40 100644 --- a/processor/logstransformprocessor/go.sum +++ b/processor/logstransformprocessor/go.sum @@ -16,6 +16,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/azureeventhubreceiver/go.mod b/receiver/azureeventhubreceiver/go.mod index d994251514c4..bb40e5510041 100644 --- a/receiver/azureeventhubreceiver/go.mod +++ b/receiver/azureeventhubreceiver/go.mod @@ -46,6 +46,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/receiver/azureeventhubreceiver/go.sum b/receiver/azureeventhubreceiver/go.sum index 791a975366d6..6c05d71ee02d 100644 --- a/receiver/azureeventhubreceiver/go.sum +++ b/receiver/azureeventhubreceiver/go.sum @@ -70,6 +70,8 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= diff --git a/receiver/filelogreceiver/go.mod b/receiver/filelogreceiver/go.mod index e6bd586479f6..cf26875da1e4 100644 --- a/receiver/filelogreceiver/go.mod +++ b/receiver/filelogreceiver/go.mod @@ -28,6 +28,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/filelogreceiver/go.sum b/receiver/filelogreceiver/go.sum index fcf0d0d2de9d..450d1358162d 100644 --- a/receiver/filelogreceiver/go.sum +++ b/receiver/filelogreceiver/go.sum @@ -18,6 +18,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/journaldreceiver/go.mod b/receiver/journaldreceiver/go.mod index 9ba553dee65f..cbdffca4a9a0 100644 --- a/receiver/journaldreceiver/go.mod +++ b/receiver/journaldreceiver/go.mod @@ -25,6 +25,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/journaldreceiver/go.sum b/receiver/journaldreceiver/go.sum index b217f7323e25..2f220272bf09 100644 --- a/receiver/journaldreceiver/go.sum +++ b/receiver/journaldreceiver/go.sum @@ -16,6 +16,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/mongodbatlasreceiver/go.mod b/receiver/mongodbatlasreceiver/go.mod index bb46295c09cd..5d6ebb4a715e 100644 --- a/receiver/mongodbatlasreceiver/go.mod +++ b/receiver/mongodbatlasreceiver/go.mod @@ -39,6 +39,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/uuid v1.6.0 // indirect diff --git a/receiver/mongodbatlasreceiver/go.sum b/receiver/mongodbatlasreceiver/go.sum index 2fb790e32b97..c3b5fc9e8cf1 100644 --- a/receiver/mongodbatlasreceiver/go.sum +++ b/receiver/mongodbatlasreceiver/go.sum @@ -20,6 +20,8 @@ github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/namedpipereceiver/go.mod b/receiver/namedpipereceiver/go.mod index 754661ddea8e..4c645eebde7b 100644 --- a/receiver/namedpipereceiver/go.mod +++ b/receiver/namedpipereceiver/go.mod @@ -26,6 +26,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/namedpipereceiver/go.sum b/receiver/namedpipereceiver/go.sum index 209d6af5a7df..a1bfe2dc7940 100644 --- a/receiver/namedpipereceiver/go.sum +++ b/receiver/namedpipereceiver/go.sum @@ -18,6 +18,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/otlpjsonfilereceiver/go.mod b/receiver/otlpjsonfilereceiver/go.mod index ffb46084e9b0..cccf80ab9850 100644 --- a/receiver/otlpjsonfilereceiver/go.mod +++ b/receiver/otlpjsonfilereceiver/go.mod @@ -27,6 +27,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/otlpjsonfilereceiver/go.sum b/receiver/otlpjsonfilereceiver/go.sum index fcf0d0d2de9d..450d1358162d 100644 --- a/receiver/otlpjsonfilereceiver/go.sum +++ b/receiver/otlpjsonfilereceiver/go.sum @@ -18,6 +18,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index d522d35f4702..f93237570d51 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -71,7 +71,7 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index b1ad7fbe7bdb..8cefc8959937 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -122,8 +122,8 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= diff --git a/receiver/syslogreceiver/go.mod b/receiver/syslogreceiver/go.mod index c2cee4f5cbfe..8e66991c8aab 100644 --- a/receiver/syslogreceiver/go.mod +++ b/receiver/syslogreceiver/go.mod @@ -27,6 +27,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/syslogreceiver/go.sum b/receiver/syslogreceiver/go.sum index 71e2a3394010..a2944c9759e3 100644 --- a/receiver/syslogreceiver/go.sum +++ b/receiver/syslogreceiver/go.sum @@ -18,6 +18,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/tcplogreceiver/go.mod b/receiver/tcplogreceiver/go.mod index 8078e7d178e9..b1a48f248fa9 100644 --- a/receiver/tcplogreceiver/go.mod +++ b/receiver/tcplogreceiver/go.mod @@ -25,6 +25,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/tcplogreceiver/go.sum b/receiver/tcplogreceiver/go.sum index 71e2a3394010..a2944c9759e3 100644 --- a/receiver/tcplogreceiver/go.sum +++ b/receiver/tcplogreceiver/go.sum @@ -18,6 +18,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/udplogreceiver/go.mod b/receiver/udplogreceiver/go.mod index a5f8b2594eb2..dfdec5eef9bb 100644 --- a/receiver/udplogreceiver/go.mod +++ b/receiver/udplogreceiver/go.mod @@ -24,6 +24,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/udplogreceiver/go.sum b/receiver/udplogreceiver/go.sum index b217f7323e25..2f220272bf09 100644 --- a/receiver/udplogreceiver/go.sum +++ b/receiver/udplogreceiver/go.sum @@ -16,6 +16,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/receiver/windowseventlogreceiver/go.mod b/receiver/windowseventlogreceiver/go.mod index a3ff47c2fe75..f8536b4825d9 100644 --- a/receiver/windowseventlogreceiver/go.mod +++ b/receiver/windowseventlogreceiver/go.mod @@ -26,6 +26,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect diff --git a/receiver/windowseventlogreceiver/go.sum b/receiver/windowseventlogreceiver/go.sum index b217f7323e25..2f220272bf09 100644 --- a/receiver/windowseventlogreceiver/go.sum +++ b/receiver/windowseventlogreceiver/go.sum @@ -16,6 +16,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/testbed/go.mod b/testbed/go.mod index cba4199bcfb0..6352e5a81440 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -117,6 +117,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect diff --git a/testbed/go.sum b/testbed/go.sum index fe535d0037f8..1044ba5d393f 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -208,6 +208,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= From 2813fe2f02979b53db828f0d5f45ebf5ed78e0e0 Mon Sep 17 00:00:00 2001 From: "James Hughes (Splunk)" Date: Fri, 28 Jun 2024 10:03:25 -0700 Subject: [PATCH 3/9] [chore] [receiver/receivercreator] fix bad copypasta in test (createErr -> err) (#33808) Chore/bugfix, goes from this ``` {"level":"error","ts":1719538225.8125398,"caller":"receivercreator@v0.103.0/observerhandler.go:159","msg":"failed to start receiver","kind":"receiver","receiver":"jmx/cassandra","error":"failed starting endpoint-derived receiver: %!w()", ``` to this ``` {"level":"error","ts":1719548614.7125309,"caller":"receivercreator@v0.103.0/observerhandler.go:159","msg":"failed to start receiver","kind":"receiver","receiver":"jmx/cassandra","error":"failed starting endpoint-derived receiver: failed to parse Endpoint \"172.20.0.2:7199\": parse \"172.20.0.2:7199\": first path segment in URL cannot contain colon" ``` (fixing the issue with [`url.Parse`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/jmxreceiver/receiver.go#L174) [error](https://github.com/golang/go/blob/master/src/net/url/url.go#L557) is another topic entirely, given the jmx code accepts it without the `jmx` prefix, but that's another issue...) --- receiver/receivercreator/runner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/receivercreator/runner.go b/receiver/receivercreator/runner.go index 7d8abafaf413..ab76676f66d6 100644 --- a/receiver/receivercreator/runner.go +++ b/receiver/receivercreator/runner.go @@ -108,7 +108,7 @@ func (run *receiverRunner) start( } if err = wr.Start(context.Background(), run.host); err != nil { - return nil, fmt.Errorf("failed starting endpoint-derived receiver: %w", createError) + return nil, fmt.Errorf("failed starting endpoint-derived receiver: %w", err) } return wr, nil From fb4ab3c8f00700826f073a522342e39a3cde8702 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Sat, 29 Jun 2024 23:48:43 -0700 Subject: [PATCH 4/9] [chore][receiver/awscontainerinsights] Enable goleak checks (#32404) **Description:** Enable `goleak` checks on the `cadvisor` package of the AWS Container Insights receiver to help ensure no goroutines are being leaked. **Link to tracking Issue:** #30438 **Testing:** This package is a linux-only package, so I wasn't able to test locally, but CI/CD tests are passing. Co-authored-by: Sean Marciniak <30928402+MovieStoreGuy@users.noreply.github.com> --- .../internal/cadvisor/package_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 receiver/awscontainerinsightreceiver/internal/cadvisor/package_test.go diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/package_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/package_test.go new file mode 100644 index 000000000000..8be0b28a2858 --- /dev/null +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/package_test.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package cadvisor + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} From cbc04f33a0c9ae34a82acf41805c0a1fa7ffdc07 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Mon, 1 Jul 2024 00:59:09 +0800 Subject: [PATCH 5/9] [chore] fix failed k8s e2e test bugs (#33818) **Description:** related #33813 **Link to tracking Issue:** **Testing:** **Documentation:** Signed-off-by: Ziqi Zhao --- processor/k8sattributesprocessor/e2e_test.go | 2 ++ receiver/k8sclusterreceiver/e2e_test.go | 2 ++ receiver/kubeletstatsreceiver/e2e_test.go | 2 ++ 3 files changed, 6 insertions(+) diff --git a/processor/k8sattributesprocessor/e2e_test.go b/processor/k8sattributesprocessor/e2e_test.go index 41802c81563e..4800516655b2 100644 --- a/processor/k8sattributesprocessor/e2e_test.go +++ b/processor/k8sattributesprocessor/e2e_test.go @@ -978,6 +978,8 @@ func resourceHasAttributes(resource pcommon.Resource, kvs map[string]*expectedVa func startUpSinks(t *testing.T, mc *consumertest.MetricsSink, tc *consumertest.TracesSink, lc *consumertest.LogsSink) func() { f := otlpreceiver.NewFactory() cfg := f.CreateDefaultConfig().(*otlpreceiver.Config) + cfg.HTTP = nil + cfg.GRPC.NetAddr.Endpoint = "0.0.0.0:4317" _, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, mc) require.NoError(t, err, "failed creating metrics receiver") diff --git a/receiver/k8sclusterreceiver/e2e_test.go b/receiver/k8sclusterreceiver/e2e_test.go index 8e3418c61b11..a4e6af75dcd3 100644 --- a/receiver/k8sclusterreceiver/e2e_test.go +++ b/receiver/k8sclusterreceiver/e2e_test.go @@ -111,6 +111,8 @@ func TestE2E(t *testing.T) { func startUpSink(t *testing.T, mc *consumertest.MetricsSink) func() { f := otlpreceiver.NewFactory() cfg := f.CreateDefaultConfig().(*otlpreceiver.Config) + cfg.HTTP = nil + cfg.GRPC.NetAddr.Endpoint = "0.0.0.0:4317" rcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, mc) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) diff --git a/receiver/kubeletstatsreceiver/e2e_test.go b/receiver/kubeletstatsreceiver/e2e_test.go index ba5ace29985b..24d0835e2b03 100644 --- a/receiver/kubeletstatsreceiver/e2e_test.go +++ b/receiver/kubeletstatsreceiver/e2e_test.go @@ -69,6 +69,8 @@ func TestE2E(t *testing.T) { func startUpSink(t *testing.T, mc *consumertest.MetricsSink) func() { f := otlpreceiver.NewFactory() cfg := f.CreateDefaultConfig().(*otlpreceiver.Config) + cfg.HTTP = nil + cfg.GRPC.NetAddr.Endpoint = "0.0.0.0:4317" rcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, mc) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) From 081464465b3a2afe80b2a91b9e451bc0e216aab8 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Mon, 1 Jul 2024 08:11:04 +0100 Subject: [PATCH 6/9] [exporter/elasticsearch] Data stream routing based on `data_stream.*` attributes (#33794) **Description:** Taking over from #33755 - Add data stream routing based on `data_stream.*` attributes - Refine metrics grouping to work with DS routing **Link to tracking Issue:** Closes #33755 Fixes #33756 **Testing:** See unit tests **Documentation:** Updated readme --------- Co-authored-by: Andrzej Stencel Co-authored-by: Andrew Wilkins --- ...r-attribute-based-data-stream-routing.yaml | 29 ++ exporter/elasticsearchexporter/README.md | 48 +-- exporter/elasticsearchexporter/attribute.go | 46 +-- exporter/elasticsearchexporter/config_test.go | 57 +++- .../data_stream_router.go | 82 +++++ exporter/elasticsearchexporter/exporter.go | 95 ++++-- .../elasticsearchexporter/exporter_test.go | 284 +++++++++++++++--- exporter/elasticsearchexporter/factory.go | 17 +- exporter/elasticsearchexporter/go.mod | 2 +- exporter/elasticsearchexporter/model.go | 197 ++++++------ exporter/elasticsearchexporter/model_test.go | 37 ++- exporter/elasticsearchexporter/utils_test.go | 35 +++ 12 files changed, 655 insertions(+), 274 deletions(-) create mode 100644 .chloggen/elasticsearch-exporter-attribute-based-data-stream-routing.yaml create mode 100644 exporter/elasticsearchexporter/data_stream_router.go diff --git a/.chloggen/elasticsearch-exporter-attribute-based-data-stream-routing.yaml b/.chloggen/elasticsearch-exporter-attribute-based-data-stream-routing.yaml new file mode 100644 index 000000000000..33ade28d21ed --- /dev/null +++ b/.chloggen/elasticsearch-exporter-attribute-based-data-stream-routing.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/elasticsearch + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add data stream routing + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33794, 33756] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + `data_stream.dataset` and `data_stream.namespace` in attributes will be respected when config `*_dynamic_index.enabled` is true. + + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index 206111c48c20..6c2db78b52da 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -14,7 +14,7 @@ [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -This exporter supports sending OpenTelemetry logs and traces to [Elasticsearch](https://www.elastic.co/elasticsearch). +This exporter supports sending logs, metrics and traces to [Elasticsearch](https://www.elastic.co/elasticsearch). ## Configuration options @@ -83,39 +83,43 @@ The Elasticsearch exporter supports the common [`sending_queue` settings][export ### Elasticsearch document routing Telemetry data will be written to signal specific data streams by default: -logs to `logs-generic-default`, and traces to `traces-generic-default`. +logs to `logs-generic-default`, metrics to `metrics-generic-default`, and traces to `traces-generic-default`. This can be customised through the following settings: -- `index` (DEPRECATED, please use `logs_index` for logs, `traces_index` for traces): The [index] or [data stream] name to publish events to. +- `index` (DEPRECATED, please use `logs_index` for logs, `metrics_index` for metrics, `traces_index` for traces): The [index] or [data stream] name to publish events to. The default value is `logs-generic-default`. + - `logs_index`: The [index] or [data stream] name to publish events to. The default value is `logs-generic-default` -- `logs_dynamic_index` (optional): - takes resource or log record attribute named `elasticsearch.index.prefix` and `elasticsearch.index.suffix` - resulting dynamically prefixed / suffixed indexing based on `logs_index`. (priority: resource attribute > log record attribute) - - `enabled`(default=false): Enable/Disable dynamic index for log records -- `metrics_index`: The [index] or [data stream] name to publish metrics to. The default value is `metrics-generic-default`. + +- `logs_dynamic_index` (optional): uses resource, scope, or log record attributes to dynamically construct index name. + - `enabled`(default=false): Enable/Disable dynamic index for log records. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: log record attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `logs-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if + `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > log record attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${logs_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `logs-generic-default`, and `logs_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. + +- `metrics_index` (optional): The [index] or [data stream] name to publish metrics to. The default value is `metrics-generic-default`. ⚠️ Note that metrics support is currently in development. -- `metrics_dynamic_index` (optional): - takes resource attributes named `elasticsearch.index.prefix` and `elasticsearch.index.suffix` - resulting dynamically prefixed / suffixed indexing based on `metrics_index`. + +- `metrics_dynamic_index` (optional): uses resource, scope or data point attributes to dynamically construct index name. ⚠️ Note that metrics support is currently in development. - - `enabled`(default=false): Enable/Disable dynamic index for metrics + - `enabled`(default=true): Enable/disable dynamic index for metrics. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: data point attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `metrics-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if + `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > data point attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${metrics_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `metrics-generic-default`, and `metrics_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. + - `traces_index`: The [index] or [data stream] name to publish traces to. The default value is `traces-generic-default`. -- `traces_dynamic_index` (optional): - takes resource or span attribute named `elasticsearch.index.prefix` and `elasticsearch.index.suffix` - resulting dynamically prefixed / suffixed indexing based on `traces_index`. (priority: resource attribute > span attribute) - - `enabled`(default=false): Enable/Disable dynamic index for trace spans -- `logstash_format` (optional): Logstash format compatibility. Traces or Logs data can be written into an index in logstash format. - - `enabled`(default=false): Enable/Disable Logstash format compatibility. When `logstash_format.enabled` is `true`, the index name is composed using `traces/logs_index` or `traces/logs_dynamic_index` as prefix and the date, - e.g: If `traces/logs_index` or `traces/logs_dynamic_index` is equals to `otlp-generic-default` your index will become `otlp-generic-default-YYYY.MM.DD`. - The last string appended belongs to the date when the data is being generated. + +- `traces_dynamic_index` (optional): uses resource, scope, or span attributes to dynamically construct index name. + - `enabled`(default=false): Enable/Disable dynamic index for trace spans. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: span attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `traces-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if + `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > span attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${traces_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `traces-generic-default`, and `traces_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. + +- `logstash_format` (optional): Logstash format compatibility. Logs, metrics and traces can be written into an index in Logstash format. + - `enabled`(default=false): Enable/disable Logstash format compatibility. When `logstash_format.enabled` is `true`, the index name is composed using `(logs|metrics|traces)_index` or `(logs|metrics|traces)_dynamic_index` as prefix and the date as suffix, + e.g: If `logs_index` or `logs_dynamic_index` is equal to `logs-generic-default`, your index will become `logs-generic-default-YYYY.MM.DD`. + The last string appended belongs to the date when the data is being generated. - `prefix_separator`(default=`-`): Set a separator between logstash_prefix and date. - `date_format`(default=`%Y.%m.%d`): Time format (based on strftime) to generate the second part of the Index name. ### Elasticsearch document mapping The Elasticsearch exporter supports several document schemas and preprocessing -behaviours, which may be configured throug the following settings: +behaviours, which may be configured through the following settings: - `mapping`: Events are encoded to JSON. The `mapping` allows users to configure additional mapping rules. @@ -142,7 +146,7 @@ behaviours, which may be configured throug the following settings: In ECS mapping mode, the Elastisearch Exporter attempts to map fields from [OpenTelemetry Semantic Conventions][SemConv] (version 1.22.0) to [Elastic Common Schema][ECS]. -This mode may be used for compatibility with existing dashboards that work with with ECS. +This mode may be used for compatibility with existing dashboards that work with ECS. ### Elasticsearch ingest pipeline diff --git a/exporter/elasticsearchexporter/attribute.go b/exporter/elasticsearchexporter/attribute.go index 987b13f807bb..369e885e22cc 100644 --- a/exporter/elasticsearchexporter/attribute.go +++ b/exporter/elasticsearchexporter/attribute.go @@ -7,43 +7,23 @@ import "go.opentelemetry.io/collector/pdata/pcommon" // dynamic index attribute key constants const ( - indexPrefix = "elasticsearch.index.prefix" - indexSuffix = "elasticsearch.index.suffix" + indexPrefix = "elasticsearch.index.prefix" + indexSuffix = "elasticsearch.index.suffix" + dataStreamDataset = "data_stream.dataset" + dataStreamNamespace = "data_stream.namespace" + dataStreamType = "data_stream.type" + defaultDataStreamDataset = "generic" + defaultDataStreamNamespace = "default" + defaultDataStreamTypeLogs = "logs" + defaultDataStreamTypeMetrics = "metrics" + defaultDataStreamTypeTraces = "traces" ) -// resource is higher priotized than record attribute -type attrGetter interface { - Attributes() pcommon.Map -} - -// retrieve attribute out of resource, scope, and record (span or log, if not found in resource) -// Deprecated: Use getFromAttributesNew instead. -func getFromAttributes(name string, resource, scope, record attrGetter) string { - var str string - val, exist := resource.Attributes().Get(name) - if !exist { - val, exist = scope.Attributes().Get(name) - if !exist { - val, exist = record.Attributes().Get(name) - if exist { - str = val.AsString() - } - } - if exist { - str = val.AsString() - } - } - if exist { - str = val.AsString() - } - return str -} - -func getFromAttributesNew(name string, defaultValue string, attributeMaps ...pcommon.Map) string { +func getFromAttributes(name string, defaultValue string, attributeMaps ...pcommon.Map) (string, bool) { for _, attributeMap := range attributeMaps { if value, exists := attributeMap.Get(name); exists { - return value.AsString() + return value.AsString(), true } } - return defaultValue + return defaultValue, false } diff --git a/exporter/elasticsearchexporter/config_test.go b/exporter/elasticsearchexporter/config_test.go index 8ca137118a0a..c409f175497e 100644 --- a/exporter/elasticsearchexporter/config_test.go +++ b/exporter/elasticsearchexporter/config_test.go @@ -57,12 +57,21 @@ func TestConfig(t *testing.T) { NumConsumers: exporterhelper.NewDefaultQueueSettings().NumConsumers, QueueSize: exporterhelper.NewDefaultQueueSettings().QueueSize, }, - Endpoints: []string{"https://elastic.example.com:9200"}, - Index: "", - LogsIndex: "logs-generic-default", + Endpoints: []string{"https://elastic.example.com:9200"}, + Index: "", + LogsIndex: "logs-generic-default", + LogsDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, MetricsIndex: "metrics-generic-default", - TracesIndex: "trace_index", - Pipeline: "mypipeline", + MetricsDynamicIndex: DynamicIndexSetting{ + Enabled: true, + }, + TracesIndex: "trace_index", + TracesDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, + Pipeline: "mypipeline", ClientConfig: confighttp.ClientConfig{ Timeout: 2 * time.Minute, MaxIdleConns: &defaultMaxIdleConns, @@ -110,12 +119,21 @@ func TestConfig(t *testing.T) { NumConsumers: exporterhelper.NewDefaultQueueSettings().NumConsumers, QueueSize: exporterhelper.NewDefaultQueueSettings().QueueSize, }, - Endpoints: []string{"http://localhost:9200"}, - Index: "", - LogsIndex: "my_log_index", + Endpoints: []string{"http://localhost:9200"}, + Index: "", + LogsIndex: "my_log_index", + LogsDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, MetricsIndex: "metrics-generic-default", - TracesIndex: "traces-generic-default", - Pipeline: "mypipeline", + MetricsDynamicIndex: DynamicIndexSetting{ + Enabled: true, + }, + TracesIndex: "traces-generic-default", + TracesDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, + Pipeline: "mypipeline", ClientConfig: confighttp.ClientConfig{ Timeout: 2 * time.Minute, MaxIdleConns: &defaultMaxIdleConns, @@ -163,12 +181,21 @@ func TestConfig(t *testing.T) { NumConsumers: exporterhelper.NewDefaultQueueSettings().NumConsumers, QueueSize: exporterhelper.NewDefaultQueueSettings().QueueSize, }, - Endpoints: []string{"http://localhost:9200"}, - Index: "", - LogsIndex: "logs-generic-default", + Endpoints: []string{"http://localhost:9200"}, + Index: "", + LogsIndex: "logs-generic-default", + LogsDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, MetricsIndex: "my_metric_index", - TracesIndex: "traces-generic-default", - Pipeline: "mypipeline", + MetricsDynamicIndex: DynamicIndexSetting{ + Enabled: true, + }, + TracesIndex: "traces-generic-default", + TracesDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, + Pipeline: "mypipeline", ClientConfig: confighttp.ClientConfig{ Timeout: 2 * time.Minute, MaxIdleConns: &defaultMaxIdleConns, diff --git a/exporter/elasticsearchexporter/data_stream_router.go b/exporter/elasticsearchexporter/data_stream_router.go new file mode 100644 index 000000000000..0368f6a1b958 --- /dev/null +++ b/exporter/elasticsearchexporter/data_stream_router.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package elasticsearchexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter" + +import ( + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func routeWithDefaults(defaultDSType, defaultDSDataset, defaultDSNamespace string) func( + pcommon.Map, + pcommon.Map, + pcommon.Map, + string, +) string { + return func( + recordAttr pcommon.Map, + scopeAttr pcommon.Map, + resourceAttr pcommon.Map, + fIndex string, + ) string { + // Order: + // 1. read data_stream.* from attributes + // 2. read elasticsearch.index.* from attributes + // 3. use default hardcoded data_stream.* + dataset, datasetExists := getFromAttributes(dataStreamDataset, defaultDSDataset, recordAttr, scopeAttr, resourceAttr) + namespace, namespaceExists := getFromAttributes(dataStreamNamespace, defaultDSNamespace, recordAttr, scopeAttr, resourceAttr) + dataStreamMode := datasetExists || namespaceExists + if !dataStreamMode { + prefix, prefixExists := getFromAttributes(indexPrefix, "", resourceAttr, scopeAttr, recordAttr) + suffix, suffixExists := getFromAttributes(indexSuffix, "", resourceAttr, scopeAttr, recordAttr) + if prefixExists || suffixExists { + return fmt.Sprintf("%s%s%s", prefix, fIndex, suffix) + } + } + recordAttr.PutStr(dataStreamDataset, dataset) + recordAttr.PutStr(dataStreamNamespace, namespace) + recordAttr.PutStr(dataStreamType, defaultDSType) + return fmt.Sprintf("%s-%s-%s", defaultDSType, dataset, namespace) + } +} + +// routeLogRecord returns the name of the index to send the log record to according to data stream routing attributes and prefix/suffix attributes. +// This function may mutate record attributes. +func routeLogRecord( + record plog.LogRecord, + scope pcommon.InstrumentationScope, + resource pcommon.Resource, + fIndex string, +) string { + route := routeWithDefaults(defaultDataStreamTypeLogs, defaultDataStreamDataset, defaultDataStreamNamespace) + return route(record.Attributes(), scope.Attributes(), resource.Attributes(), fIndex) +} + +// routeDataPoint returns the name of the index to send the data point to according to data stream routing attributes. +// This function may mutate record attributes. +func routeDataPoint( + dataPoint pmetric.NumberDataPoint, + scope pcommon.InstrumentationScope, + resource pcommon.Resource, + fIndex string, +) string { + route := routeWithDefaults(defaultDataStreamTypeMetrics, defaultDataStreamDataset, defaultDataStreamNamespace) + return route(dataPoint.Attributes(), scope.Attributes(), resource.Attributes(), fIndex) +} + +// routeSpan returns the name of the index to send the span to according to data stream routing attributes. +// This function may mutate record attributes. +func routeSpan( + span ptrace.Span, + scope pcommon.InstrumentationScope, + resource pcommon.Resource, + fIndex string, +) string { + route := routeWithDefaults(defaultDataStreamTypeTraces, defaultDataStreamDataset, defaultDataStreamNamespace) + return route(span.Attributes(), scope.Attributes(), resource.Attributes(), fIndex) +} diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go index 01103adfa12f..6cb64da0983d 100644 --- a/exporter/elasticsearchexporter/exporter.go +++ b/exporter/elasticsearchexporter/exporter.go @@ -16,6 +16,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/objmodel" ) type elasticsearchExporter struct { @@ -117,10 +119,7 @@ func (e *elasticsearchExporter) pushLogsData(ctx context.Context, ld plog.Logs) func (e *elasticsearchExporter) pushLogRecord(ctx context.Context, resource pcommon.Resource, record plog.LogRecord, scope pcommon.InstrumentationScope) error { fIndex := e.index if e.dynamicIndex { - prefix := getFromAttributes(indexPrefix, resource, scope, record) - suffix := getFromAttributes(indexSuffix, resource, scope, record) - - fIndex = fmt.Sprintf("%s%s%s", prefix, fIndex, suffix) + fIndex = routeLogRecord(record, scope, resource, fIndex) } if e.logstashFormat.Enabled { @@ -149,51 +148,84 @@ func (e *elasticsearchExporter) pushMetricsData( resourceMetric := resourceMetrics.At(i) resource := resourceMetric.Resource() scopeMetrics := resourceMetric.ScopeMetrics() - for j := 0; j < scopeMetrics.Len(); j++ { - scope := scopeMetrics.At(j).Scope() - metricSlice := scopeMetrics.At(j).Metrics() - if err := e.pushMetricSlice(ctx, resource, metricSlice, scope); err != nil { - if ctxErr := ctx.Err(); ctxErr != nil { - return ctxErr + resourceDocs := make(map[string]map[uint32]objmodel.Document) + + for j := 0; j < scopeMetrics.Len(); j++ { + scopeMetrics := scopeMetrics.At(j) + scope := scopeMetrics.Scope() + for k := 0; k < scopeMetrics.Metrics().Len(); k++ { + metric := scopeMetrics.Metrics().At(k) + + // We only support Sum and Gauge metrics at the moment. + var dataPoints pmetric.NumberDataPointSlice + switch metric.Type() { + case pmetric.MetricTypeSum: + dataPoints = metric.Sum().DataPoints() + case pmetric.MetricTypeGauge: + dataPoints = metric.Gauge().DataPoints() } - errs = append(errs, err) + for l := 0; l < dataPoints.Len(); l++ { + dataPoint := dataPoints.At(l) + fIndex, err := e.getMetricDataPointIndex(resource, scope, dataPoint) + if err != nil { + errs = append(errs, err) + continue + } + if _, ok := resourceDocs[fIndex]; !ok { + resourceDocs[fIndex] = make(map[uint32]objmodel.Document) + } + if err := e.model.upsertMetricDataPoint(resourceDocs[fIndex], resource, scope, metric, dataPoint); err != nil { + errs = append(errs, err) + } + } } + } + + for fIndex, docs := range resourceDocs { + for _, doc := range docs { + var ( + docBytes []byte + err error + ) + docBytes, err = e.model.encodeDocument(doc) + if err != nil { + errs = append(errs, err) + continue + } + if err := pushDocuments(ctx, fIndex, docBytes, e.bulkIndexer); err != nil { + if cerr := ctx.Err(); cerr != nil { + return cerr + } + errs = append(errs, err) + } + } } } return errors.Join(errs...) } -func (e *elasticsearchExporter) pushMetricSlice( - ctx context.Context, +func (e *elasticsearchExporter) getMetricDataPointIndex( resource pcommon.Resource, - slice pmetric.MetricSlice, scope pcommon.InstrumentationScope, -) error { + dataPoint pmetric.NumberDataPoint, +) (string, error) { fIndex := e.index if e.dynamicIndex { - prefix := getFromAttributesNew(indexPrefix, "", resource.Attributes()) - suffix := getFromAttributesNew(indexSuffix, "", resource.Attributes()) - - fIndex = fmt.Sprintf("%s%s%s", prefix, fIndex, suffix) - } - - documents, err := e.model.encodeMetrics(resource, slice, scope) - if err != nil { - return fmt.Errorf("failed to encode a metric event: %w", err) + fIndex = routeDataPoint(dataPoint, scope, resource, fIndex) } - for _, document := range documents { - err := pushDocuments(ctx, fIndex, document, e.bulkIndexer) + if e.logstashFormat.Enabled { + formattedIndex, err := generateIndexWithLogstashFormat(fIndex, &e.logstashFormat, time.Now()) if err != nil { - return err + return "", err } + fIndex = formattedIndex } - - return nil + return fIndex, nil } func (e *elasticsearchExporter) pushTraceData( @@ -228,10 +260,7 @@ func (e *elasticsearchExporter) pushTraceData( func (e *elasticsearchExporter) pushTraceRecord(ctx context.Context, resource pcommon.Resource, span ptrace.Span, scope pcommon.InstrumentationScope) error { fIndex := e.index if e.dynamicIndex { - prefix := getFromAttributes(indexPrefix, resource, scope, span) - suffix := getFromAttributes(indexSuffix, resource, scope, span) - - fIndex = fmt.Sprintf("%s%s%s", prefix, fIndex, suffix) + fIndex = routeSpan(span, scope, resource, fIndex) } if e.logstashFormat.Enabled { diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index d2fda9b9aeb9..754cfaa4675f 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -10,7 +10,6 @@ import ( "fmt" "net/http" "runtime" - "strings" "sync" "sync/atomic" "testing" @@ -168,7 +167,7 @@ func TestExporterLogs(t *testing.T) { <-done }) - t.Run("publish with dynamic index", func(t *testing.T) { + t.Run("publish with dynamic index, prefix_suffix", func(t *testing.T) { rec := newBulkRecorder() var ( @@ -180,16 +179,8 @@ func TestExporterLogs(t *testing.T) { server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { rec.Record(docs) - data, err := docs[0].Action.MarshalJSON() - assert.NoError(t, err) - - jsonVal := map[string]any{} - err = json.Unmarshal(data, &jsonVal) - assert.NoError(t, err) - - create := jsonVal["create"].(map[string]any) expected := fmt.Sprintf("%s%s%s", prefix, index, suffix) - assert.Equal(t, expected, create["_index"].(string)) + assert.Equal(t, expected, actionJSONToIndex(t, docs[0].Action)) return itemsAllOK(docs) }) @@ -213,20 +204,40 @@ func TestExporterLogs(t *testing.T) { rec.WaitItems(1) }) - t.Run("publish with logstash index format enabled and dynamic index disabled", func(t *testing.T) { + t.Run("publish with dynamic index, data_stream", func(t *testing.T) { rec := newBulkRecorder() server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { rec.Record(docs) - data, err := docs[0].Action.MarshalJSON() - assert.NoError(t, err) + assert.Equal(t, "logs-record.dataset-resource.namespace", actionJSONToIndex(t, docs[0].Action)) - jsonVal := map[string]any{} - err = json.Unmarshal(data, &jsonVal) - assert.NoError(t, err) + return itemsAllOK(docs) + }) - create := jsonVal["create"].(map[string]any) - assert.Contains(t, create["_index"], "not-used-index") + exporter := newTestLogsExporter(t, server.URL, func(cfg *Config) { + cfg.LogsDynamicIndex.Enabled = true + }) + logs := newLogsWithAttributeAndResourceMap( + map[string]string{ + dataStreamDataset: "record.dataset", + }, + map[string]string{ + dataStreamDataset: "resource.dataset", + dataStreamNamespace: "resource.namespace", + }, + ) + logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Body().SetStr("hello world") + mustSendLogs(t, exporter, logs) + + rec.WaitItems(1) + }) + + t.Run("publish with logstash index format enabled and dynamic index disabled", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + + assert.Contains(t, actionJSONToIndex(t, docs[0].Action), "not-used-index") return itemsAllOK(docs) }) @@ -250,17 +261,8 @@ func TestExporterLogs(t *testing.T) { server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { rec.Record(docs) - data, err := docs[0].Action.MarshalJSON() - assert.NoError(t, err) - - jsonVal := map[string]any{} - err = json.Unmarshal(data, &jsonVal) - assert.NoError(t, err) - - create := jsonVal["create"].(map[string]any) expected := fmt.Sprintf("%s%s%s", prefix, index, suffix) - - assert.Equal(t, strings.Contains(create["_index"].(string), expected), true) + assert.Contains(t, actionJSONToIndex(t, docs[0].Action), expected) return itemsAllOK(docs) }) @@ -469,6 +471,168 @@ func TestExporterMetrics(t *testing.T) { rec.WaitItems(2) }) + t.Run("publish with dynamic index, prefix_suffix", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + + expected := "resource.prefix-metrics.index-resource.suffix" + assert.Equal(t, expected, actionJSONToIndex(t, docs[0].Action)) + + return itemsAllOK(docs) + }) + + exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) { + cfg.MetricsIndex = "metrics.index" + }) + metrics := newMetricsWithAttributeAndResourceMap( + map[string]string{ + indexSuffix: "-data.point.suffix", + }, + map[string]string{ + indexPrefix: "resource.prefix-", + indexSuffix: "-resource.suffix", + }, + ) + metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetName("my.metric") + mustSendMetrics(t, exporter, metrics) + + rec.WaitItems(1) + }) + + t.Run("publish with dynamic index, data_stream", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + + expected := "metrics-resource.dataset-data.point.namespace" + assert.Equal(t, expected, actionJSONToIndex(t, docs[0].Action)) + + return itemsAllOK(docs) + }) + + exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) { + cfg.MetricsIndex = "metrics.index" + }) + metrics := newMetricsWithAttributeAndResourceMap( + map[string]string{ + dataStreamNamespace: "data.point.namespace", + }, + map[string]string{ + dataStreamDataset: "resource.dataset", + dataStreamNamespace: "resource.namespace", + }, + ) + metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetName("my.metric") + mustSendMetrics(t, exporter, metrics) + + rec.WaitItems(1) + }) + + t.Run("publish with metrics grouping", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + return itemsAllOK(docs) + }) + + exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) { + cfg.MetricsIndex = "metrics.index" + cfg.Mapping.Mode = "ecs" + }) + + addToMetricSlice := func(metricSlice pmetric.MetricSlice) { + fooMetric := metricSlice.AppendEmpty() + fooMetric.SetName("metric.foo") + fooDps := fooMetric.SetEmptyGauge().DataPoints() + fooDp := fooDps.AppendEmpty() + fooDp.SetIntValue(1) + fooOtherDp := fooDps.AppendEmpty() + fillResourceAttributeMap(fooOtherDp.Attributes(), map[string]string{ + "dp.attribute": "dp.attribute.value", + }) + fooOtherDp.SetDoubleValue(1.0) + + barMetric := metricSlice.AppendEmpty() + barMetric.SetName("metric.bar") + barDps := barMetric.SetEmptyGauge().DataPoints() + barDp := barDps.AppendEmpty() + barDp.SetDoubleValue(1.0) + barOtherDp := barDps.AppendEmpty() + fillResourceAttributeMap(barOtherDp.Attributes(), map[string]string{ + "dp.attribute": "dp.attribute.value", + }) + barOtherDp.SetDoubleValue(1.0) + barOtherIndexDp := barDps.AppendEmpty() + fillResourceAttributeMap(barOtherIndexDp.Attributes(), map[string]string{ + "dp.attribute": "dp.attribute.value", + dataStreamNamespace: "bar", + }) + barOtherIndexDp.SetDoubleValue(1.0) + + bazMetric := metricSlice.AppendEmpty() + bazMetric.SetName("metric.baz") + bazDps := bazMetric.SetEmptyGauge().DataPoints() + bazDp := bazDps.AppendEmpty() + bazDp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(3600, 0))) + bazDp.SetDoubleValue(1.0) + } + + metrics := pmetric.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + fillResourceAttributeMap(resourceMetrics.Resource().Attributes(), map[string]string{ + dataStreamNamespace: "resource.namespace", + }) + scopeA := resourceMetrics.ScopeMetrics().AppendEmpty() + addToMetricSlice(scopeA.Metrics()) + + scopeB := resourceMetrics.ScopeMetrics().AppendEmpty() + fillResourceAttributeMap(scopeB.Scope().Attributes(), map[string]string{ + dataStreamDataset: "scope.b", + }) + addToMetricSlice(scopeB.Metrics()) + + mustSendMetrics(t, exporter, metrics) + + rec.WaitItems(8) + + expected := []itemRequest{ + { + Action: []byte(`{"create":{"_index":"metrics-generic-bar"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"bar","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-generic-resource.namespace"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1,"foo":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-generic-resource.namespace"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"metric":{"bar":1,"foo":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-generic-resource.namespace"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"metric":{"baz":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-scope.b-bar"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"bar","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-scope.b-resource.namespace"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1,"foo":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-scope.b-resource.namespace"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"metric":{"bar":1,"foo":1}}`), + }, + { + Action: []byte(`{"create":{"_index":"metrics-scope.b-resource.namespace"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"metric":{"baz":1}}`), + }, + } + + assertItemsEqual(t, expected, rec.Items(), false) + }) } func TestExporterTraces(t *testing.T) { @@ -486,7 +650,7 @@ func TestExporterTraces(t *testing.T) { rec.WaitItems(2) }) - t.Run("publish with dynamic index", func(t *testing.T) { + t.Run("publish with dynamic index, prefix_suffix", func(t *testing.T) { rec := newBulkRecorder() var ( @@ -531,23 +695,43 @@ func TestExporterTraces(t *testing.T) { rec.WaitItems(1) }) - t.Run("publish with logstash format index", func(t *testing.T) { - var defaultCfg Config + t.Run("publish with dynamic index, data_stream", func(t *testing.T) { rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { rec.Record(docs) - data, err := docs[0].Action.MarshalJSON() - assert.NoError(t, err) + expected := "traces-span.dataset-default" + assert.Equal(t, expected, actionJSONToIndex(t, docs[0].Action)) - jsonVal := map[string]any{} - err = json.Unmarshal(data, &jsonVal) - assert.NoError(t, err) + return itemsAllOK(docs) + }) - create := jsonVal["create"].(map[string]any) + exporter := newTestTracesExporter(t, server.URL, func(cfg *Config) { + cfg.TracesDynamicIndex.Enabled = true + }) - assert.Equal(t, strings.Contains(create["_index"].(string), defaultCfg.TracesIndex), true) + mustSendTraces(t, exporter, newTracesWithAttributeAndResourceMap( + map[string]string{ + dataStreamDataset: "span.dataset", + }, + map[string]string{ + dataStreamDataset: "resource.dataset", + }, + )) + + rec.WaitItems(1) + }) + + t.Run("publish with logstash format index", func(t *testing.T) { + var defaultCfg Config + + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + + assert.Contains(t, actionJSONToIndex(t, docs[0].Action), defaultCfg.TracesIndex) return itemsAllOK(docs) }) @@ -574,17 +758,8 @@ func TestExporterTraces(t *testing.T) { server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { rec.Record(docs) - data, err := docs[0].Action.MarshalJSON() - assert.NoError(t, err) - - jsonVal := map[string]any{} - err = json.Unmarshal(data, &jsonVal) - assert.NoError(t, err) - - create := jsonVal["create"].(map[string]any) expected := fmt.Sprintf("%s%s%s", prefix, index, suffix) - - assert.Equal(t, strings.Contains(create["_index"].(string), expected), true) + assert.Contains(t, actionJSONToIndex(t, docs[0].Action), expected) return itemsAllOK(docs) }) @@ -776,3 +951,14 @@ type roundTripperFunc func(*http.Request) (*http.Response, error) func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } + +func actionJSONToIndex(t *testing.T, actionJSON json.RawMessage) string { + action := struct { + Create struct { + Index string `json:"_index"` + } `json:"create"` + }{} + err := json.Unmarshal(actionJSON, &action) + require.NoError(t, err) + return action.Create.Index +} diff --git a/exporter/elasticsearchexporter/factory.go b/exporter/elasticsearchexporter/factory.go index cc2e27421f39..7826fb59a47e 100644 --- a/exporter/elasticsearchexporter/factory.go +++ b/exporter/elasticsearchexporter/factory.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" @@ -49,8 +50,17 @@ func createDefaultConfig() component.Config { ClientConfig: httpClientConfig, Index: "", LogsIndex: defaultLogsIndex, - MetricsIndex: defaultMetricsIndex, - TracesIndex: defaultTracesIndex, + LogsDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, + MetricsIndex: defaultMetricsIndex, + MetricsDynamicIndex: DynamicIndexSetting{ + Enabled: true, + }, + TracesIndex: defaultTracesIndex, + TracesDynamicIndex: DynamicIndexSetting{ + Enabled: false, + }, Retry: RetrySettings{ Enabled: true, MaxRequests: 3, @@ -104,6 +114,7 @@ func createLogsExporter( set, cfg, exporter.pushLogsData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}), exporterhelper.WithStart(exporter.Start), exporterhelper.WithShutdown(exporter.Shutdown), exporterhelper.WithQueue(cf.QueueSettings), @@ -127,6 +138,7 @@ func createMetricsExporter( set, cfg, exporter.pushMetricsData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}), exporterhelper.WithStart(exporter.Start), exporterhelper.WithShutdown(exporter.Shutdown), exporterhelper.WithQueue(cf.QueueSettings), @@ -149,6 +161,7 @@ func createTracesExporter(ctx context.Context, set, cfg, exporter.pushTraceData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}), exporterhelper.WithStart(exporter.Start), exporterhelper.WithShutdown(exporter.Shutdown), exporterhelper.WithQueue(cf.QueueSettings), diff --git a/exporter/elasticsearchexporter/go.mod b/exporter/elasticsearchexporter/go.mod index bbff538ff408..9c49a61022cb 100644 --- a/exporter/elasticsearchexporter/go.mod +++ b/exporter/elasticsearchexporter/go.mod @@ -17,6 +17,7 @@ require ( go.opentelemetry.io/collector/config/confighttp v0.103.0 go.opentelemetry.io/collector/config/configopaque v1.10.0 go.opentelemetry.io/collector/confmap v0.103.0 + go.opentelemetry.io/collector/consumer v0.103.0 go.opentelemetry.io/collector/exporter v0.103.0 go.opentelemetry.io/collector/extension/auth v0.103.0 go.opentelemetry.io/collector/pdata v1.10.0 @@ -70,7 +71,6 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.103.0 // indirect go.opentelemetry.io/collector/config/configtls v0.103.0 // indirect go.opentelemetry.io/collector/config/internal v0.103.0 // indirect - go.opentelemetry.io/collector/consumer v0.103.0 // indirect go.opentelemetry.io/collector/extension v0.103.0 // indirect go.opentelemetry.io/collector/featuregate v1.10.0 // indirect go.opentelemetry.io/collector/receiver v0.103.0 // indirect diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 626b8f566e1f..6df90e214d78 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -64,8 +64,9 @@ var resourceAttrsToPreserve = map[string]bool{ type mappingModel interface { encodeLog(pcommon.Resource, plog.LogRecord, pcommon.InstrumentationScope) ([]byte, error) - encodeMetrics(pcommon.Resource, pmetric.MetricSlice, pcommon.InstrumentationScope) ([][]byte, error) encodeSpan(pcommon.Resource, ptrace.Span, pcommon.InstrumentationScope) ([]byte, error) + upsertMetricDataPoint(map[uint32]objmodel.Document, pcommon.Resource, pcommon.InstrumentationScope, pmetric.Metric, pmetric.NumberDataPoint) error + encodeDocument(objmodel.Document) ([]byte, error) } // encodeModel tries to keep the event as close to the original open telemetry semantics as is. @@ -169,123 +170,42 @@ func (m *encodeModel) encodeLogECSMode(resource pcommon.Resource, record plog.Lo return document } -func (m *encodeModel) encodeMetrics(resource pcommon.Resource, metrics pmetric.MetricSlice, _ pcommon.InstrumentationScope) ([][]byte, error) { - var baseDoc objmodel.Document - - baseDoc.AddAttributes("", resource.Attributes()) - - // Put all metrics that have the same attributes and timestamp in one document. - docs := map[uint32]*objmodel.Document{} - for i := 0; i < metrics.Len(); i++ { - metric := metrics.At(i) - - var dps pmetric.NumberDataPointSlice - - // Only Gauge and Sum metric types are supported at the moment. - switch metric.Type() { - case pmetric.MetricTypeGauge: - dps = metric.Gauge().DataPoints() - case pmetric.MetricTypeSum: - dps = metric.Sum().DataPoints() - } - - for j := 0; j < dps.Len(); j++ { - dp := dps.At(j) - - hash := metricHash(dp.Timestamp(), dp.Attributes()) - doc, docExists := docs[hash] - if !docExists { - doc = baseDoc.Clone() - doc.AddTimestamp("@timestamp", dp.Timestamp()) - doc.AddAttributes("", dp.Attributes()) - - docs[hash] = doc - } - - switch dp.ValueType() { - case pmetric.NumberDataPointValueTypeDouble: - doc.AddAttribute(metric.Name(), pcommon.NewValueDouble(dp.DoubleValue())) - case pmetric.NumberDataPointValueTypeInt: - doc.AddAttribute(metric.Name(), pcommon.NewValueInt(dp.IntValue())) - } - } +func (m *encodeModel) encodeDocument(document objmodel.Document) ([]byte, error) { + if m.dedup { + document.Dedup() + } else if m.dedot { + document.Sort() } - res := make([][]byte, 0, len(docs)) - - for _, doc := range docs { - if m.dedup { - doc.Dedup() - } else if m.dedot { - doc.Sort() - } - - var buf bytes.Buffer - err := doc.Serialize(&buf, m.dedot) - if err != nil { - return nil, err - } - - res = append(res, buf.Bytes()) + var buf bytes.Buffer + err := document.Serialize(&buf, m.dedot) + if err != nil { + return nil, err } - - return res, nil -} - -func metricHash(timestamp pcommon.Timestamp, attributes pcommon.Map) uint32 { - hasher := fnv.New32a() - - timestampBuf := make([]byte, 8) - binary.LittleEndian.PutUint64(timestampBuf, uint64(timestamp)) - hasher.Write(timestampBuf) - - mapHash(hasher, attributes) - - return hasher.Sum32() + return buf.Bytes(), nil } -func mapHash(hasher hash.Hash, m pcommon.Map) { - m.Range(func(k string, v pcommon.Value) bool { - hasher.Write([]byte(k)) - valueHash(hasher, v) - - return true - }) -} - -func valueHash(h hash.Hash, v pcommon.Value) { - switch v.Type() { - case pcommon.ValueTypeEmpty: - h.Write([]byte{0}) - case pcommon.ValueTypeStr: - h.Write([]byte(v.Str())) - case pcommon.ValueTypeBool: - if v.Bool() { - h.Write([]byte{1}) - } else { - h.Write([]byte{0}) - } - case pcommon.ValueTypeDouble: - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, math.Float64bits(v.Double())) - h.Write(buf) - case pcommon.ValueTypeInt: - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, uint64(v.Int())) - h.Write(buf) - case pcommon.ValueTypeBytes: - h.Write(v.Bytes().AsRaw()) - case pcommon.ValueTypeMap: - mapHash(h, v.Map()) - case pcommon.ValueTypeSlice: - sliceHash(h, v.Slice()) +func (m *encodeModel) upsertMetricDataPoint(documents map[uint32]objmodel.Document, resource pcommon.Resource, _ pcommon.InstrumentationScope, metric pmetric.Metric, dp pmetric.NumberDataPoint) error { + hash := metricHash(dp.Timestamp(), dp.Attributes()) + var ( + document objmodel.Document + ok bool + ) + if document, ok = documents[hash]; !ok { + document.AddAttributes("", resource.Attributes()) + document.AddTimestamp("@timestamp", dp.Timestamp()) + document.AddAttributes("", dp.Attributes()) } -} -func sliceHash(h hash.Hash, s pcommon.Slice) { - for i := 0; i < s.Len(); i++ { - valueHash(h, s.At(i)) + switch dp.ValueType() { + case pmetric.NumberDataPointValueTypeDouble: + document.AddAttribute(metric.Name(), pcommon.NewValueDouble(dp.DoubleValue())) + case pmetric.NumberDataPointValueTypeInt: + document.AddAttribute(metric.Name(), pcommon.NewValueInt(dp.IntValue())) } + + documents[hash] = document + return nil } func (m *encodeModel) encodeSpan(resource pcommon.Resource, span ptrace.Span, scope pcommon.InstrumentationScope) ([]byte, error) { @@ -479,3 +399,60 @@ func encodeLogTimestampECSMode(document *objmodel.Document, record plog.LogRecor document.AddTimestamp("@timestamp", record.ObservedTimestamp()) } + +// TODO use https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/exp/metrics/identity +func metricHash(timestamp pcommon.Timestamp, attributes pcommon.Map) uint32 { + hasher := fnv.New32a() + + timestampBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(timestampBuf, uint64(timestamp)) + hasher.Write(timestampBuf) + + mapHash(hasher, attributes) + + return hasher.Sum32() +} + +func mapHash(hasher hash.Hash, m pcommon.Map) { + m.Range(func(k string, v pcommon.Value) bool { + hasher.Write([]byte(k)) + valueHash(hasher, v) + + return true + }) +} + +func valueHash(h hash.Hash, v pcommon.Value) { + switch v.Type() { + case pcommon.ValueTypeEmpty: + h.Write([]byte{0}) + case pcommon.ValueTypeStr: + h.Write([]byte(v.Str())) + case pcommon.ValueTypeBool: + if v.Bool() { + h.Write([]byte{1}) + } else { + h.Write([]byte{0}) + } + case pcommon.ValueTypeDouble: + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, math.Float64bits(v.Double())) + h.Write(buf) + case pcommon.ValueTypeInt: + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, uint64(v.Int())) + h.Write(buf) + case pcommon.ValueTypeBytes: + h.Write(v.Bytes().AsRaw()) + case pcommon.ValueTypeMap: + mapHash(h, v.Map()) + case pcommon.ValueTypeSlice: + sliceHash(h, v.Slice()) + } +} + +func sliceHash(h hash.Hash, s pcommon.Slice) { + for i := 0; i < s.Len(); i++ { + valueHash(h, s.At(i)) + } +} diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index 26199ba8cca9..675f78af83f7 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -90,21 +90,40 @@ func TestEncodeMetric(t *testing.T) { model := &encodeModel{ dedot: true, dedup: true, - mode: MappingNone, + mode: MappingECS, + } + + docs := make(map[uint32]objmodel.Document) + + var docsBytes [][]byte + for i := 0; i < metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().Len(); i++ { + err := model.upsertMetricDataPoint(docs, + metrics.ResourceMetrics().At(0).Resource(), + metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope(), + metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0), + metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(i)) + require.NoError(t, err) + } + + for _, doc := range docs { + bytes, err := model.encodeDocument(doc) + require.NoError(t, err) + docsBytes = append(docsBytes, bytes) } - docsBytes, err := model.encodeMetrics(metrics.ResourceMetrics().At(0).Resource(), metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics(), metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope()) + allDocsSorted := docBytesToSortedString(docsBytes) + assert.Equal(t, expectedMetricsEncoded, allDocsSorted) +} + +func docBytesToSortedString(docsBytes [][]byte) string { // Convert the byte arrays to strings and sort the docs to make the test deterministic. - require.NoError(t, err) - docs := make([]string, 0, len(docsBytes)) - for _, docBytes := range docsBytes { - docs = append(docs, string(docBytes)) + docs := make([]string, len(docsBytes)) + for i, docBytes := range docsBytes { + docs[i] = string(docBytes) } sort.Strings(docs) allDocsSorted := strings.Join(docs, "\n") - - // Test that the result matches the expected value. - assert.Equal(t, expectedMetricsEncoded, allDocsSorted) + return allDocsSorted } func createTestMetrics(t *testing.T) pmetric.Metrics { diff --git a/exporter/elasticsearchexporter/utils_test.go b/exporter/elasticsearchexporter/utils_test.go index de3d60418b24..f57f16272c24 100644 --- a/exporter/elasticsearchexporter/utils_test.go +++ b/exporter/elasticsearchexporter/utils_test.go @@ -10,6 +10,7 @@ import ( "fmt" "net/http" "net/http/httptest" + "slices" "strings" "sync" "testing" @@ -18,6 +19,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -26,6 +28,29 @@ type itemRequest struct { Document json.RawMessage } +func itemRequestsSortFunc(a, b itemRequest) int { + comp := bytes.Compare(a.Action, b.Action) + if comp == 0 { + return bytes.Compare(a.Document, b.Document) + } + return comp +} + +func assertItemsEqual(t *testing.T, expected, actual []itemRequest, assertOrder bool) { + expectedItems := expected + actualItems := actual + if !assertOrder { + // Make copies to avoid mutating the args + expectedItems = make([]itemRequest, len(expected)) + copy(expectedItems, expected) + slices.SortFunc(expectedItems, itemRequestsSortFunc) + actualItems = make([]itemRequest, len(actual)) + copy(actualItems, actual) + slices.SortFunc(actualItems, itemRequestsSortFunc) + } + assert.Equal(t, expectedItems, actualItems) +} + type itemResponse struct { Status int `json:"status"` } @@ -236,6 +261,16 @@ func newLogsWithAttributeAndResourceMap(attrMp map[string]string, resMp map[stri return logs } +func newMetricsWithAttributeAndResourceMap(attrMp map[string]string, resMp map[string]string) pmetric.Metrics { + metrics := pmetric.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + + fillResourceAttributeMap(resourceMetrics.Resource().Attributes(), resMp) + fillResourceAttributeMap(resourceMetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetEmptySum().DataPoints().AppendEmpty().Attributes(), attrMp) + + return metrics +} + func newTracesWithAttributeAndResourceMap(attrMp map[string]string, resMp map[string]string) ptrace.Traces { traces := ptrace.NewTraces() resourceSpans := traces.ResourceSpans() From 5260327a1cda25fc37319db730b67a4aa0c28edf Mon Sep 17 00:00:00 2001 From: Martin Majlis <122797378+martin-majlis-s1@users.noreply.github.com> Date: Mon, 1 Jul 2024 11:22:45 +0200 Subject: [PATCH 7/9] [datasetexporter] Upgrade to dataset-go v0.20.0 (#33812) **Description:** Upgrade to dataset-go v0.20.0 In the PR #31293 we have introduced releasing unused resources. However there was a bug that was occasional SIGSEVs. This was fixed in https://github.com/scalyr/dataset-go/pull/100. Therefore this PR is: * upgrading to `dataset-go` v0.20.0 - https://github.com/scalyr/dataset-go/releases/tag/v0.20.0 - which contains the fix * introducing configuration option `buffer.max_parallel_outgoing` to control the maximum number of outgoing connections. **Link to tracking Issue:** #33812 **Testing:** Unit tests and stress tests **Documentation:** Screenshot 2024-06-27 at 12 04 24 --- .../datasetexporter-update-to-0.20.0.yaml | 29 +++++++++++++++++++ cmd/otelcontribcol/go.mod | 3 +- cmd/otelcontribcol/go.sum | 6 ++-- exporter/datasetexporter/README.md | 1 + exporter/datasetexporter/config.go | 4 +++ exporter/datasetexporter/config_test.go | 2 +- exporter/datasetexporter/factory_test.go | 2 ++ exporter/datasetexporter/go.mod | 3 +- exporter/datasetexporter/go.sum | 6 ++-- .../logs_exporter_stress_test.go | 1 + .../datasetexporter/logs_exporter_test.go | 1 + exporter/datasetexporter/testdata/config.yaml | 1 + 12 files changed, 46 insertions(+), 13 deletions(-) create mode 100644 .chloggen/datasetexporter-update-to-0.20.0.yaml diff --git a/.chloggen/datasetexporter-update-to-0.20.0.yaml b/.chloggen/datasetexporter-update-to-0.20.0.yaml new file mode 100644 index 000000000000..8fd3f2a3a6be --- /dev/null +++ b/.chloggen/datasetexporter-update-to-0.20.0.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'bug_fix' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: datasetexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Upgrade dataset-go to v0.20.0 + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33812] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Upgrade `dataset-go` library from v0.19.0 to v0.20.0. + Make number of outgoing connections configurable. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 8d4f659ac027..45992c2a411b 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -429,7 +429,6 @@ require ( github.com/containerd/ttrpc v1.2.2 // indirect github.com/coreos/go-oidc/v3 v3.10.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cskr/pubsub v1.0.2 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -693,7 +692,7 @@ require ( github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/samber/lo v1.38.1 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect - github.com/scalyr/dataset-go v0.19.0 // indirect + github.com/scalyr/dataset-go v0.20.0 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect github.com/segmentio/asm v1.2.0 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index fb26b551853c..97856cae6d95 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -1170,8 +1170,6 @@ github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHf github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= -github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= @@ -2109,8 +2107,8 @@ github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= -github.com/scalyr/dataset-go v0.19.0 h1:rlpvUnyKM9PD+b+sXqJVwktkQ+vkecKSVjSs+Z3IioI= -github.com/scalyr/dataset-go v0.19.0/go.mod h1:k/A+KhdEyffuTGb1n+jSWg8J5ikV9iYVKejVGjGXXoA= +github.com/scalyr/dataset-go v0.20.0 h1:VZJew4W/ufnb67yZSpWQKI40U4XpA+w+cDM+mk0qS2E= +github.com/scalyr/dataset-go v0.20.0/go.mod h1:k/A+KhdEyffuTGb1n+jSWg8J5ikV9iYVKejVGjGXXoA= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= diff --git a/exporter/datasetexporter/README.md b/exporter/datasetexporter/README.md index 05720c9645df..7472ff9ef12c 100644 --- a/exporter/datasetexporter/README.md +++ b/exporter/datasetexporter/README.md @@ -54,6 +54,7 @@ Make sure to provide the appropriate server host value in the `serverHost` attri - `retry_max_interval` (default = 30s): Is the upper bound on backoff. - `retry_max_elapsed_time` (default = 300s): Is the maximum amount of time spent trying to send a buffer. - `retry_shutdown_timeout` (default = 30s): The maximum time for which it will try to send data to the DataSet during shutdown. This value should be shorter than container's grace period. + - `max_parallel_outgoing` (default = 100): The maximum number of parallel outgoing requests. - `logs`: - `export_resource_info_on_event` (default = false): Include LogRecord resource information (if available) on the DataSet event. - `export_resource_prefix` (default = 'resource.attributes.'): A prefix string for the resource, if `export_resource_info_on_event` is enabled. diff --git a/exporter/datasetexporter/config.go b/exporter/datasetexporter/config.go index 3f6fe531708f..138478425ff3 100644 --- a/exporter/datasetexporter/config.go +++ b/exporter/datasetexporter/config.go @@ -111,6 +111,7 @@ const bufferRetryInitialInterval = 5 * time.Second const bufferRetryMaxInterval = 30 * time.Second const bufferRetryMaxElapsedTime = 300 * time.Second const bufferRetryShutdownTimeout = 30 * time.Second +const bufferMaxParallelOutgoing = 100 type BufferSettings struct { MaxLifetime time.Duration `mapstructure:"max_lifetime"` @@ -120,6 +121,7 @@ type BufferSettings struct { RetryMaxInterval time.Duration `mapstructure:"retry_max_interval"` RetryMaxElapsedTime time.Duration `mapstructure:"retry_max_elapsed_time"` RetryShutdownTimeout time.Duration `mapstructure:"retry_shutdown_timeout"` + MaxParallelOutgoing int `mapstructure:"max_parallel_outgoing"` } // newDefaultBufferSettings returns the default settings for BufferSettings. @@ -132,6 +134,7 @@ func newDefaultBufferSettings() BufferSettings { RetryMaxInterval: bufferRetryMaxInterval, RetryMaxElapsedTime: bufferRetryMaxElapsedTime, RetryShutdownTimeout: bufferRetryShutdownTimeout, + MaxParallelOutgoing: bufferMaxParallelOutgoing, } } @@ -218,6 +221,7 @@ func (c *Config) convert() *ExporterConfig { RetryMultiplier: backoff.DefaultMultiplier, RetryRandomizationFactor: backoff.DefaultRandomizationFactor, RetryShutdownTimeout: c.BufferSettings.RetryShutdownTimeout, + MaxParallelOutgoing: c.BufferSettings.MaxParallelOutgoing, }, ServerHostSettings: server_host_config.DataSetServerHostSettings{ UseHostName: c.ServerHostSettings.UseHostName, diff --git a/exporter/datasetexporter/config_test.go b/exporter/datasetexporter/config_test.go index 156eaac34d2f..55682b3cf20b 100644 --- a/exporter/datasetexporter/config_test.go +++ b/exporter/datasetexporter/config_test.go @@ -143,7 +143,7 @@ func TestConfigString(t *testing.T) { } assert.Equal(t, - "DatasetURL: https://example.com; APIKey: [REDACTED] (6); Debug: true; BufferSettings: {MaxLifetime:123ns PurgeOlderThan:567ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s RetryShutdownTimeout:0s}; LogsSettings: {ExportResourceInfo:true ExportResourcePrefix:AAA ExportScopeInfo:true ExportScopePrefix:BBB DecomposeComplexMessageField:true DecomposedComplexMessagePrefix:EEE exportSettings:{ExportSeparator:CCC ExportDistinguishingSuffix:DDD}}; TracesSettings: {exportSettings:{ExportSeparator:TTT ExportDistinguishingSuffix:UUU}}; ServerHostSettings: {UseHostName:false ServerHost:foo-bar}; BackOffConfig: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}", + "DatasetURL: https://example.com; APIKey: [REDACTED] (6); Debug: true; BufferSettings: {MaxLifetime:123ns PurgeOlderThan:567ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s RetryShutdownTimeout:0s MaxParallelOutgoing:0}; LogsSettings: {ExportResourceInfo:true ExportResourcePrefix:AAA ExportScopeInfo:true ExportScopePrefix:BBB DecomposeComplexMessageField:true DecomposedComplexMessagePrefix:EEE exportSettings:{ExportSeparator:CCC ExportDistinguishingSuffix:DDD}}; TracesSettings: {exportSettings:{ExportSeparator:TTT ExportDistinguishingSuffix:UUU}}; ServerHostSettings: {UseHostName:false ServerHost:foo-bar}; BackOffConfig: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}", config.String(), ) } diff --git a/exporter/datasetexporter/factory_test.go b/exporter/datasetexporter/factory_test.go index a2693366d3d1..61ed2fb819b2 100644 --- a/exporter/datasetexporter/factory_test.go +++ b/exporter/datasetexporter/factory_test.go @@ -72,6 +72,7 @@ func TestLoadConfig(t *testing.T) { RetryMaxInterval: bufferRetryMaxInterval, RetryMaxElapsedTime: bufferRetryMaxElapsedTime, RetryShutdownTimeout: bufferRetryShutdownTimeout, + MaxParallelOutgoing: bufferMaxParallelOutgoing, }, TracesSettings: newDefaultTracesSettings(), LogsSettings: newDefaultLogsSettings(), @@ -95,6 +96,7 @@ func TestLoadConfig(t *testing.T) { RetryMaxInterval: 22 * time.Second, RetryMaxElapsedTime: 23 * time.Second, RetryShutdownTimeout: 24 * time.Second, + MaxParallelOutgoing: 25, }, TracesSettings: TracesSettings{ exportSettings: exportSettings{ diff --git a/exporter/datasetexporter/go.mod b/exporter/datasetexporter/go.mod index 9f7aa5e028d3..34f5704c2cf7 100644 --- a/exporter/datasetexporter/go.mod +++ b/exporter/datasetexporter/go.mod @@ -6,7 +6,7 @@ require ( github.com/google/uuid v1.6.0 // github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.103.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.103.0 - github.com/scalyr/dataset-go v0.19.0 + github.com/scalyr/dataset-go v0.20.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.103.0 go.opentelemetry.io/collector/confmap v0.103.0 @@ -27,7 +27,6 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cskr/pubsub v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/exporter/datasetexporter/go.sum b/exporter/datasetexporter/go.sum index 4a4f3738ca0e..ceee2d306eca 100644 --- a/exporter/datasetexporter/go.sum +++ b/exporter/datasetexporter/go.sum @@ -4,8 +4,6 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= -github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -60,8 +58,8 @@ github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/scalyr/dataset-go v0.19.0 h1:rlpvUnyKM9PD+b+sXqJVwktkQ+vkecKSVjSs+Z3IioI= -github.com/scalyr/dataset-go v0.19.0/go.mod h1:k/A+KhdEyffuTGb1n+jSWg8J5ikV9iYVKejVGjGXXoA= +github.com/scalyr/dataset-go v0.20.0 h1:VZJew4W/ufnb67yZSpWQKI40U4XpA+w+cDM+mk0qS2E= +github.com/scalyr/dataset-go v0.20.0/go.mod h1:k/A+KhdEyffuTGb1n+jSWg8J5ikV9iYVKejVGjGXXoA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= diff --git a/exporter/datasetexporter/logs_exporter_stress_test.go b/exporter/datasetexporter/logs_exporter_stress_test.go index 3ef6222fd8f1..5f1a37f693a5 100644 --- a/exporter/datasetexporter/logs_exporter_stress_test.go +++ b/exporter/datasetexporter/logs_exporter_stress_test.go @@ -85,6 +85,7 @@ func TestConsumeLogsManyLogsShouldSucceed(t *testing.T) { RetryMaxElapsedTime: 50 * maxDelay, RetryShutdownTimeout: time.Minute, PurgeOlderThan: 100 * maxDelay, + MaxParallelOutgoing: bufferMaxParallelOutgoing, }, BackOffConfig: configretry.NewDefaultBackOffConfig(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 7eb223da94fd..f350f070eeae 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -795,6 +795,7 @@ func TestConsumeLogsShouldSucceed(t *testing.T) { RetryMaxInterval: time.Minute, RetryMaxElapsedTime: time.Hour, RetryShutdownTimeout: time.Minute, + MaxParallelOutgoing: 100, }, LogsSettings: LogsSettings{ ExportResourceInfo: true, diff --git a/exporter/datasetexporter/testdata/config.yaml b/exporter/datasetexporter/testdata/config.yaml index 1338aec01753..5d9dd5173574 100644 --- a/exporter/datasetexporter/testdata/config.yaml +++ b/exporter/datasetexporter/testdata/config.yaml @@ -26,6 +26,7 @@ dataset/full: retry_max_interval: 22s retry_max_elapsed_time: 23s retry_shutdown_timeout: 24s + max_parallel_outgoing: 25 logs: export_resource_info_on_event: true export_resource_prefix: "_resource_" From 74244e46a6b6ef502695ca02b9a7ed13927a7951 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Mon, 1 Jul 2024 13:13:17 +0100 Subject: [PATCH 8/9] [exporter/elasticsearch] Encode metrics resource attributes in ECS mapping mode (#33823) **Description:** Encode metrics resource attributes in ECS mapping mode **Link to tracking Issue:** **Testing:** Unit tests **Documentation:** --- ...exporter-metrics-ecs-attr-translation.yaml | 27 +++++++++++++++ exporter/elasticsearchexporter/model.go | 10 +++--- exporter/elasticsearchexporter/model_test.go | 34 +++++++++---------- 3 files changed, 49 insertions(+), 22 deletions(-) create mode 100644 .chloggen/elasticsearch-exporter-metrics-ecs-attr-translation.yaml diff --git a/.chloggen/elasticsearch-exporter-metrics-ecs-attr-translation.yaml b/.chloggen/elasticsearch-exporter-metrics-ecs-attr-translation.yaml new file mode 100644 index 000000000000..716edaf178ab --- /dev/null +++ b/.chloggen/elasticsearch-exporter-metrics-ecs-attr-translation.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/elasticsearch + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Encode metrics resource attributes in ECS mapping mode + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33823] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 6df90e214d78..ccf76b5afdf6 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -132,13 +132,13 @@ func (m *encodeModel) encodeLogECSMode(resource pcommon.Resource, record plog.Lo var document objmodel.Document // First, try to map resource-level attributes to ECS fields. - encodeLogAttributesECSMode(&document, resource.Attributes(), resourceAttrsConversionMap, resourceAttrsToPreserve) + encodeAttributesECSMode(&document, resource.Attributes(), resourceAttrsConversionMap, resourceAttrsToPreserve) // Then, try to map scope-level attributes to ECS fields. scopeAttrsConversionMap := map[string]string{ // None at the moment } - encodeLogAttributesECSMode(&document, scope.Attributes(), scopeAttrsConversionMap, resourceAttrsToPreserve) + encodeAttributesECSMode(&document, scope.Attributes(), scopeAttrsConversionMap, resourceAttrsToPreserve) // Finally, try to map record-level attributes to ECS fields. recordAttrsConversionMap := map[string]string{ @@ -148,7 +148,7 @@ func (m *encodeModel) encodeLogECSMode(resource pcommon.Resource, record plog.Lo semconv.AttributeExceptionType: "error.type", semconv.AttributeExceptionEscaped: "event.error.exception.handled", } - encodeLogAttributesECSMode(&document, record.Attributes(), recordAttrsConversionMap, resourceAttrsToPreserve) + encodeAttributesECSMode(&document, record.Attributes(), recordAttrsConversionMap, resourceAttrsToPreserve) // Handle special cases. encodeLogAgentNameECSMode(&document, resource) @@ -192,7 +192,7 @@ func (m *encodeModel) upsertMetricDataPoint(documents map[uint32]objmodel.Docume ok bool ) if document, ok = documents[hash]; !ok { - document.AddAttributes("", resource.Attributes()) + encodeAttributesECSMode(&document, resource.Attributes(), resourceAttrsConversionMap, resourceAttrsToPreserve) document.AddTimestamp("@timestamp", dp.Timestamp()) document.AddAttributes("", dp.Attributes()) } @@ -283,7 +283,7 @@ func scopeToAttributes(scope pcommon.InstrumentationScope) pcommon.Map { return attrs } -func encodeLogAttributesECSMode(document *objmodel.Document, attrs pcommon.Map, conversionMap map[string]string, preserveMap map[string]bool) { +func encodeAttributesECSMode(document *objmodel.Document, attrs pcommon.Map, conversionMap map[string]string, preserveMap map[string]bool) { if len(conversionMap) == 0 { // No conversions to be done; add all attributes at top level of // document. diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index 675f78af83f7..d3784e5081f1 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -26,22 +26,22 @@ var expectedSpanBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attribut var expectedLogBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attributes.log-attr1":"value1","Body":"log-body","Resource.key1":"value1","Scope.name":"","Scope.version":"","SeverityNumber":0,"TraceFlags":0}` -var expectedMetricsEncoded = `{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"idle","system":{"cpu":{"time":440.23}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"interrupt","system":{"cpu":{"time":0}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"nice","system":{"cpu":{"time":0.14}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"softirq","system":{"cpu":{"time":0.77}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"steal","system":{"cpu":{"time":0}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"system","system":{"cpu":{"time":24.8}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"user","system":{"cpu":{"time":64.78}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"name":"my-host"},"os":{"type":"linux"},"state":"wait","system":{"cpu":{"time":1.65}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"idle","system":{"cpu":{"time":475.69}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"interrupt","system":{"cpu":{"time":0}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"nice","system":{"cpu":{"time":0.1}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"softirq","system":{"cpu":{"time":0.57}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"steal","system":{"cpu":{"time":0}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"system","system":{"cpu":{"time":15.88}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"user","system":{"cpu":{"time":50.09}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"name":"my-host"},"os":{"type":"linux"},"state":"wait","system":{"cpu":{"time":0.95}}}` +var expectedMetricsEncoded = `{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"idle","system":{"cpu":{"time":440.23}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"interrupt","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"nice","system":{"cpu":{"time":0.14}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"softirq","system":{"cpu":{"time":0.77}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"steal","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"system","system":{"cpu":{"time":24.8}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"user","system":{"cpu":{"time":64.78}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"wait","system":{"cpu":{"time":1.65}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"idle","system":{"cpu":{"time":475.69}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"interrupt","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"nice","system":{"cpu":{"time":0.1}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"softirq","system":{"cpu":{"time":0.57}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"steal","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"system","system":{"cpu":{"time":15.88}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"user","system":{"cpu":{"time":50.09}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"wait","system":{"cpu":{"time":0.95}}}` var expectedLogBodyWithEmptyTimestamp = `{"@timestamp":"1970-01-01T00:00:00.000000000Z","Attributes.log-attr1":"value1","Body":"log-body","Resource.key1":"value1","Scope.name":"","Scope.version":"","SeverityNumber":0,"TraceFlags":0}` var expectedLogBodyDeDottedWithEmptyTimestamp = `{"@timestamp":"1970-01-01T00:00:00.000000000Z","Attributes":{"log-attr1":"value1"},"Body":"log-body","Resource":{"foo":{"bar":"baz"},"key1":"value1"},"Scope":{"name":"","version":""},"SeverityNumber":0,"TraceFlags":0}` @@ -877,7 +877,7 @@ func TestMapLogAttributesToECS(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { var doc objmodel.Document - encodeLogAttributesECSMode(&doc, test.attrs(), test.conversionMap, test.preserveMap) + encodeAttributesECSMode(&doc, test.attrs(), test.conversionMap, test.preserveMap) doc.Sort() expectedDoc := test.expectedDoc() From 88e18f79020087c3101faa5eee0fe4bc089b3466 Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Mon, 1 Jul 2024 08:48:29 -0400 Subject: [PATCH 9/9] [receiver/vcenter] Fixes multiple datacenters issue (#33735) --- ...nterreceiver_fix_multiple_datacenters.yaml | 27 +++++++++ receiver/vcenterreceiver/client.go | 55 ++++++++++++++----- receiver/vcenterreceiver/client_test.go | 36 ++++++++++-- receiver/vcenterreceiver/scraper.go | 37 ++++++++++--- 4 files changed, 129 insertions(+), 26 deletions(-) create mode 100644 .chloggen/vcenterreceiver_fix_multiple_datacenters.yaml diff --git a/.chloggen/vcenterreceiver_fix_multiple_datacenters.yaml b/.chloggen/vcenterreceiver_fix_multiple_datacenters.yaml new file mode 100644 index 000000000000..f4cd6b9e28c4 --- /dev/null +++ b/.chloggen/vcenterreceiver_fix_multiple_datacenters.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: vcenterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fixes errors in some of the client calls for environments containing multiple datacenters. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33734] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/vcenterreceiver/client.go b/receiver/vcenterreceiver/client.go index 6bf61883dd9b..105a5ba29676 100644 --- a/receiver/vcenterreceiver/client.go +++ b/receiver/vcenterreceiver/client.go @@ -224,29 +224,56 @@ func (vc *vcenterClient) VMs(ctx context.Context, containerMoRef vt.ManagedObjec return vms, nil } -// ResourcePoolInventoryListObjects returns the ResourcePools (with populated InventoryLists) of the vSphere SDK -func (vc *vcenterClient) ResourcePoolInventoryListObjects(ctx context.Context) ([]*object.ResourcePool, error) { - rps, err := vc.finder.ResourcePoolList(ctx, "*") +// DatacenterInventoryListObjects returns the Datacenters (with populated InventoryLists) of the vSphere SDK +func (vc *vcenterClient) DatacenterInventoryListObjects(ctx context.Context) ([]*object.Datacenter, error) { + dcs, err := vc.finder.DatacenterList(ctx, "*") if err != nil { - return nil, fmt.Errorf("unable to retrieve ResourcePools with InventoryLists: %w", err) + return nil, fmt.Errorf("unable to retrieve Datacenters with InventoryLists: %w", err) } - return rps, nil + return dcs, nil } -// VAppInventoryListObjects returns the vApps (with populated InventoryLists) of the vSphere SDK -func (vc *vcenterClient) VAppInventoryListObjects(ctx context.Context) ([]*object.VirtualApp, error) { - vApps, err := vc.finder.VirtualAppList(ctx, "*") - if err == nil { - return vApps, nil +// ResourcePoolInventoryListObjects returns the ResourcePools (with populated InventoryLists) of the vSphere SDK +func (vc *vcenterClient) ResourcePoolInventoryListObjects( + ctx context.Context, + dcs []*object.Datacenter, +) ([]*object.ResourcePool, error) { + allRPools := []*object.ResourcePool{} + for _, dc := range dcs { + vc.finder.SetDatacenter(dc) + rps, err := vc.finder.ResourcePoolList(ctx, "*") + var notFoundErr *find.NotFoundError + if err != nil && !errors.As(err, ¬FoundErr) { + return nil, fmt.Errorf("unable to retrieve ResourcePools with InventoryLists for datacenter %s: %w", dc.InventoryPath, err) + } + allRPools = append(allRPools, rps...) } - var notFoundErr *find.NotFoundError - if errors.As(err, ¬FoundErr) { - return []*object.VirtualApp{}, nil + return allRPools, nil +} + +// VAppInventoryListObjects returns the vApps (with populated InventoryLists) of the vSphere SDK +func (vc *vcenterClient) VAppInventoryListObjects( + ctx context.Context, + dcs []*object.Datacenter, +) ([]*object.VirtualApp, error) { + allVApps := []*object.VirtualApp{} + for _, dc := range dcs { + vc.finder.SetDatacenter(dc) + vApps, err := vc.finder.VirtualAppList(ctx, "*") + if err == nil { + allVApps = append(allVApps, vApps...) + continue + } + + var notFoundErr *find.NotFoundError + if !errors.As(err, ¬FoundErr) { + return nil, fmt.Errorf("unable to retrieve vApps with InventoryLists for datacenter %s: %w", dc.InventoryPath, err) + } } - return nil, fmt.Errorf("unable to retrieve vApps with InventoryLists: %w", err) + return allVApps, nil } // PerfMetricsQueryResult contains performance metric related data diff --git a/receiver/vcenterreceiver/client_test.go b/receiver/vcenterreceiver/client_test.go index 1576b4f2da3a..7cb30b7e60f1 100644 --- a/receiver/vcenterreceiver/client_test.go +++ b/receiver/vcenterreceiver/client_test.go @@ -219,17 +219,36 @@ func TestPerfMetricsQuery(t *testing.T) { }, esx) } +func TestDatacenterInventoryListObjects(t *testing.T) { + vpx := simulator.VPX() + vpx.Datacenter = 2 + simulator.Test(func(ctx context.Context, c *vim25.Client) { + finder := find.NewFinder(c) + client := vcenterClient{ + vimDriver: c, + finder: finder, + } + dcs, err := client.DatacenterInventoryListObjects(ctx) + require.NoError(t, err) + require.Equal(t, len(dcs), 2) + }, vpx) +} + func TestResourcePoolInventoryListObjects(t *testing.T) { + vpx := simulator.VPX() + vpx.Datacenter = 2 simulator.Test(func(ctx context.Context, c *vim25.Client) { finder := find.NewFinder(c) client := vcenterClient{ vimDriver: c, finder: finder, } - rps, err := client.ResourcePoolInventoryListObjects(ctx) + dcs, err := finder.DatacenterList(ctx, "*") + require.NoError(t, err) + rps, err := client.ResourcePoolInventoryListObjects(ctx, dcs) require.NoError(t, err) require.NotEmpty(t, rps, 0) - }) + }, vpx) } func TestVAppInventoryListObjects(t *testing.T) { @@ -237,6 +256,7 @@ func TestVAppInventoryListObjects(t *testing.T) { // vApps appears to be broken t.Skip() vpx := simulator.VPX() + vpx.Datacenter = 2 vpx.App = 2 simulator.Test(func(ctx context.Context, c *vim25.Client) { finder := find.NewFinder(c) @@ -244,23 +264,29 @@ func TestVAppInventoryListObjects(t *testing.T) { vimDriver: c, finder: finder, } - vApps, err := client.VAppInventoryListObjects(ctx) + dcs, err := finder.DatacenterList(ctx, "*") + require.NoError(t, err) + vApps, err := client.VAppInventoryListObjects(ctx, dcs) require.NoError(t, err) require.NotEmpty(t, vApps, 0) }, vpx) } func TestEmptyVAppInventoryListObjects(t *testing.T) { + vpx := simulator.VPX() + vpx.Datacenter = 2 simulator.Test(func(ctx context.Context, c *vim25.Client) { finder := find.NewFinder(c) client := vcenterClient{ vimDriver: c, finder: finder, } - vApps, err := client.VAppInventoryListObjects(ctx) + dcs, err := finder.DatacenterList(ctx, "*") + require.NoError(t, err) + vApps, err := client.VAppInventoryListObjects(ctx, dcs) require.NoError(t, err) require.Empty(t, vApps, 0) - }) + }, vpx) } func TestSessionReestablish(t *testing.T) { diff --git a/receiver/vcenterreceiver/scraper.go b/receiver/vcenterreceiver/scraper.go index a99ac707d44a..a1a358b038fa 100644 --- a/receiver/vcenterreceiver/scraper.go +++ b/receiver/vcenterreceiver/scraper.go @@ -5,6 +5,7 @@ import ( "context" "fmt" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" @@ -108,8 +109,9 @@ func (v *vcenterMetricScraper) scrape(ctx context.Context) (pmetric.Metrics, err // scrapeAndProcessAllMetrics collects & converts all relevant resources managed by vCenter to OTEL resources & metrics func (v *vcenterMetricScraper) scrapeAndProcessAllMetrics(ctx context.Context, errs *scrapererror.ScrapeErrors) error { v.scrapeData = newVcenterScrapeData() - v.scrapeResourcePoolInventoryListObjects(ctx, errs) - v.scrapeVAppInventoryListObjects(ctx, errs) + dcObjects := v.scrapeDatacenterInventoryListObjects(ctx, errs) + v.scrapeResourcePoolInventoryListObjects(ctx, dcObjects, errs) + v.scrapeVAppInventoryListObjects(ctx, dcObjects, errs) v.scrapeDatacenters(ctx, errs) for _, dc := range v.scrapeData.datacenters { @@ -128,13 +130,30 @@ func (v *vcenterMetricScraper) scrapeAndProcessAllMetrics(ctx context.Context, e return errs.Combine() } -// scrapeResourcePoolInventoryListObjects scrapes and store all ResourcePool objects with their InventoryLists -func (v *vcenterMetricScraper) scrapeResourcePoolInventoryListObjects(ctx context.Context, errs *scrapererror.ScrapeErrors) { +// scrapeDatacenterInventoryListObjects scrapes and stores all Datacenter objects with their InventoryLists +func (v *vcenterMetricScraper) scrapeDatacenterInventoryListObjects( + ctx context.Context, + errs *scrapererror.ScrapeErrors, +) []*object.Datacenter { + // Get Datacenters with InventoryLists and store for later retrieval + dcs, err := v.client.DatacenterInventoryListObjects(ctx) + if err != nil { + errs.AddPartial(1, err) + } + return dcs +} + +// scrapeResourcePoolInventoryListObjects scrapes and stores all ResourcePool objects with their InventoryLists +func (v *vcenterMetricScraper) scrapeResourcePoolInventoryListObjects( + ctx context.Context, + dcs []*object.Datacenter, + errs *scrapererror.ScrapeErrors, +) { // Init for current collection v.scrapeData.rPoolIPathsByRef = make(map[string]*string) // Get ResourcePools with InventoryLists and store for later retrieval - rPools, err := v.client.ResourcePoolInventoryListObjects(ctx) + rPools, err := v.client.ResourcePoolInventoryListObjects(ctx, dcs) if err != nil { errs.AddPartial(1, err) return @@ -145,12 +164,16 @@ func (v *vcenterMetricScraper) scrapeResourcePoolInventoryListObjects(ctx contex } // scrapeVAppInventoryListObjects scrapes and stores all vApp objects with their InventoryLists -func (v *vcenterMetricScraper) scrapeVAppInventoryListObjects(ctx context.Context, errs *scrapererror.ScrapeErrors) { +func (v *vcenterMetricScraper) scrapeVAppInventoryListObjects( + ctx context.Context, + dcs []*object.Datacenter, + errs *scrapererror.ScrapeErrors, +) { // Init for current collection v.scrapeData.vAppIPathsByRef = make(map[string]*string) // Get vApps with InventoryLists and store for later retrieval - vApps, err := v.client.VAppInventoryListObjects(ctx) + vApps, err := v.client.VAppInventoryListObjects(ctx, dcs) if err != nil { errs.AddPartial(1, err) return