diff --git a/.run/all.run.xml b/.run/all.run.xml
index 5b5035b88..8ed34eb80 100644
--- a/.run/all.run.xml
+++ b/.run/all.run.xml
@@ -2,7 +2,7 @@
-
+
@@ -10,4 +10,4 @@
-
+
\ No newline at end of file
diff --git a/cmd/windows_exporter/main.go b/cmd/windows_exporter/main.go
index 63a47d10c..d9537a42b 100644
--- a/cmd/windows_exporter/main.go
+++ b/cmd/windows_exporter/main.go
@@ -33,6 +33,7 @@ import (
"os/signal"
"os/user"
"runtime"
+ "runtime/debug"
"slices"
"strings"
"time"
@@ -66,6 +67,7 @@ func main() {
func run() int {
startTime := time.Now()
+ ctx := context.Background()
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
@@ -103,6 +105,10 @@ func run() int {
"process.priority",
"Priority of the exporter process. Higher priorities may improve exporter responsiveness during periods of system load. Can be one of [\"realtime\", \"high\", \"abovenormal\", \"normal\", \"belownormal\", \"low\"]",
).Default("normal").String()
+ memoryLimit = app.Flag(
+ "process.memory-limit",
+ "Limit memory usage in bytes. This is a soft-limit and not guaranteed. 0 means no limit. Read more at https://pkg.go.dev/runtime/debug#SetMemoryLimit .",
+ ).Default("200000000").Int64()
)
logFile := &log.AllowedFile{}
@@ -132,6 +138,8 @@ func run() int {
return 1
}
+ debug.SetMemoryLimit(*memoryLimit)
+
logger, err := log.New(logConfig)
if err != nil {
//nolint:sloglint // we do not have an logger yet
@@ -143,7 +151,7 @@ func run() int {
}
if *configFile != "" {
- resolver, err := config.NewResolver(*configFile, logger, *insecureSkipVerify)
+ resolver, err := config.NewResolver(ctx, *configFile, logger, *insecureSkipVerify)
if err != nil {
logger.Error("could not load config file",
slog.Any("err", err),
@@ -153,7 +161,7 @@ func run() int {
}
if err = resolver.Bind(app, os.Args[1:]); err != nil {
- logger.Error("Failed to bind configuration",
+ logger.ErrorContext(ctx, "failed to bind configuration",
slog.Any("err", err),
)
@@ -167,7 +175,7 @@ func run() int {
// Parse flags once more to include those discovered in configuration file(s).
if _, err = app.Parse(os.Args[1:]); err != nil {
- logger.Error("Failed to parse CLI args from YAML file",
+ logger.ErrorContext(ctx, "failed to parse CLI args from YAML file",
slog.Any("err", err),
)
@@ -185,7 +193,7 @@ func run() int {
}
}
- logger.Debug("Logging has Started")
+ logger.LogAttrs(ctx, slog.LevelDebug, "logging has Started")
if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
logger.Error("failed to set process priority",
@@ -217,7 +225,7 @@ func run() int {
logCurrentUser(logger)
- logger.Info("Enabled collectors: " + strings.Join(enabledCollectorList, ", "))
+ logger.InfoContext(ctx, "Enabled collectors: "+strings.Join(enabledCollectorList, ", "))
mux := http.NewServeMux()
mux.Handle("GET /health", httphandler.NewHealthHandler())
@@ -235,7 +243,7 @@ func run() int {
mux.HandleFunc("GET /debug/pprof/trace", pprof.Trace)
}
- logger.Info(fmt.Sprintf("starting windows_exporter in %s", time.Since(startTime)),
+ logger.LogAttrs(ctx, slog.LevelInfo, fmt.Sprintf("starting windows_exporter in %s", time.Since(startTime)),
slog.String("version", version.Version),
slog.String("branch", version.Branch),
slog.String("revision", version.GetRevision()),
@@ -262,7 +270,7 @@ func run() int {
close(errCh)
}()
- ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
+ ctx, stop := signal.NotifyContext(ctx, os.Interrupt, os.Kill)
defer stop()
select {
@@ -272,7 +280,7 @@ func run() int {
logger.Info("Shutting down windows_exporter via service control")
case err := <-errCh:
if err != nil {
- logger.Error("Failed to start windows_exporter",
+ logger.ErrorContext(ctx, "Failed to start windows_exporter",
slog.Any("err", err),
)
@@ -285,7 +293,7 @@ func run() int {
_ = server.Shutdown(ctx)
- logger.Info("windows_exporter has shut down")
+ logger.InfoContext(ctx, "windows_exporter has shut down")
return 0
}
@@ -326,7 +334,7 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
return nil
}
- logger.Debug("setting process priority to " + priority)
+ logger.LogAttrs(context.Background(), slog.LevelDebug, "setting process priority to "+priority)
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
handle, err := windows.OpenProcess(
diff --git a/internal/collector/ad/ad.go b/internal/collector/ad/ad.go
index b7edf0cf1..f02b4982c 100644
--- a/internal/collector/ad/ad.go
+++ b/internal/collector/ad/ad.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -36,7 +36,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
addressBookClientSessions *prometheus.Desc
addressBookOperationsTotal *prometheus.Desc
@@ -129,156 +130,9 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
- counters := []string{
- abANRPerSec,
- abBrowsesPerSec,
- abClientSessions,
- abMatchesPerSec,
- abPropertyReadsPerSec,
- abProxyLookupsPerSec,
- abSearchesPerSec,
- approximateHighestDNT,
- atqEstimatedQueueDelay,
- atqOutstandingQueuedRequests,
- atqRequestLatency,
- atqThreadsLDAP,
- atqThreadsOther,
- atqThreadsTotal,
- baseSearchesPerSec,
- databaseAddsPerSec,
- databaseDeletesPerSec,
- databaseModifiesPerSec,
- databaseRecyclesPerSec,
- digestBindsPerSec,
- draHighestUSNCommittedHighPart,
- draHighestUSNCommittedLowPart,
- draHighestUSNIssuedHighPart,
- draHighestUSNIssuedLowPart,
- draInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot,
- draInboundBytesCompressedBetweenSitesAfterCompressionPerSec,
- draInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot,
- draInboundBytesCompressedBetweenSitesBeforeCompressionPerSec,
- draInboundBytesNotCompressedWithinSiteSinceBoot,
- draInboundBytesNotCompressedWithinSitePerSec,
- draInboundBytesTotalSinceBoot,
- draInboundBytesTotalPerSec,
- draInboundFullSyncObjectsRemaining,
- draInboundLinkValueUpdatesRemainingInPacket,
- draInboundObjectUpdatesRemainingInPacket,
- draInboundObjectsAppliedPerSec,
- draInboundObjectsFilteredPerSec,
- draInboundObjectsPerSec,
- draInboundPropertiesAppliedPerSec,
- draInboundPropertiesFilteredPerSec,
- draInboundPropertiesTotalPerSec,
- draInboundTotalUpdatesRemainingInPacket,
- draInboundValuesDNsOnlyPerSec,
- draInboundValuesTotalPerSec,
- draOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot,
- draOutboundBytesCompressedBetweenSitesAfterCompressionPerSec,
- draOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot,
- draOutboundBytesCompressedBetweenSitesBeforeCompressionPerSec,
- draOutboundBytesNotCompressedWithinSiteSinceBoot,
- draOutboundBytesNotCompressedWithinSitePerSec,
- draOutboundBytesTotalSinceBoot,
- draOutboundBytesTotalPerSec,
- draOutboundObjectsFilteredPerSec,
- draOutboundObjectsPerSec,
- draOutboundPropertiesPerSec,
- draOutboundValuesDNsOnlyPerSec,
- draOutboundValuesTotalPerSec,
- draPendingReplicationOperations,
- draPendingReplicationSynchronizations,
- draSyncFailuresOnSchemaMismatch,
- draSyncRequestsMade,
- draSyncRequestsSuccessful,
- draThreadsGettingNCChanges,
- draThreadsGettingNCChangesHoldingSemaphore,
- dsPercentReadsFromDRA,
- dsPercentReadsFromKCC,
- dsPercentReadsFromLSA,
- dsPercentReadsFromNSPI,
- dsPercentReadsFromNTDSAPI,
- dsPercentReadsFromSAM,
- dsPercentReadsOther,
- dsPercentSearchesFromDRA,
- dsPercentSearchesFromKCC,
- dsPercentSearchesFromLDAP,
- dsPercentSearchesFromLSA,
- dsPercentSearchesFromNSPI,
- dsPercentSearchesFromNTDSAPI,
- dsPercentSearchesFromSAM,
- dsPercentSearchesOther,
- dsPercentWritesFromDRA,
- dsPercentWritesFromKCC,
- dsPercentWritesFromLDAP,
- dsPercentWritesFromLSA,
- dsPercentWritesFromNSPI,
- dsPercentWritesFromNTDSAPI,
- dsPercentWritesFromSAM,
- dsPercentWritesOther,
- dsClientBindsPerSec,
- dsClientNameTranslationsPerSec,
- dsDirectoryReadsPerSec,
- dsDirectorySearchesPerSec,
- dsDirectoryWritesPerSec,
- dsMonitorListSize,
- dsNameCacheHitRate,
- dsNotifyQueueSize,
- dsSearchSubOperationsPerSec,
- dsSecurityDescriptorPropagationsEvents,
- dsSecurityDescriptorPropagatorAverageExclusionTime,
- dsSecurityDescriptorPropagatorRuntimeQueue,
- dsSecurityDescriptorSubOperationsPerSec,
- dsServerBindsPerSec,
- dsServerNameTranslationsPerSec,
- dsThreadsInUse,
- externalBindsPerSec,
- fastBindsPerSec,
- ldapActiveThreads,
- ldapBindTime,
- ldapClientSessions,
- ldapClosedConnectionsPerSec,
- ldapNewConnectionsPerSec,
- ldapNewSSLConnectionsPerSec,
- ldapSearchesPerSec,
- ldapSuccessfulBindsPerSec,
- ldapUDPOperationsPerSec,
- ldapWritesPerSec,
- linkValuesCleanedPerSec,
- negotiatedBindsPerSec,
- ntlmBindsPerSec,
- oneLevelSearchesPerSec,
- phantomsCleanedPerSec,
- phantomsVisitedPerSec,
- samAccountGroupEvaluationLatency,
- samDisplayInformationQueriesPerSec,
- samDomainLocalGroupMembershipEvaluationsPerSec,
- samEnumerationsPerSec,
- samGCEvaluationsPerSec,
- samGlobalGroupMembershipEvaluationsPerSec,
- samMachineCreationAttemptsPerSec,
- samMembershipChangesPerSec,
- samNonTransitiveMembershipEvaluationsPerSec,
- samPasswordChangesPerSec,
- samResourceGroupEvaluationLatency,
- samSuccessfulComputerCreationsPerSecIncludesAllRequests,
- samSuccessfulUserCreationsPerSec,
- samTransitiveMembershipEvaluationsPerSec,
- samUniversalGroupMembershipEvaluationsPerSec,
- samUserCreationAttemptsPerSec,
- simpleBindsPerSec,
- subtreeSearchesPerSec,
- tombstonesGarbageCollectedPerSec,
- tombstonesVisitedPerSec,
- transitiveOperationsMillisecondsRun,
- transitiveOperationsPerSec,
- transitiveSubOperationsPerSec,
- }
-
var err error
- c.perfDataCollector, err = perfdata.NewCollector("DirectoryServices", perfdata.InstancesAll, counters)
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("DirectoryServices", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
}
@@ -663,210 +517,204 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", err)
}
- data, ok := perfData["NTDS"]
-
- if !ok {
- return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
- data[abANRPerSec].FirstValue,
+ c.perfDataObject[0].AbANRPerSec,
"ambiguous_name_resolution",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
- data[abBrowsesPerSec].FirstValue,
+ c.perfDataObject[0].AbBrowsesPerSec,
"browse",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
- data[abMatchesPerSec].FirstValue,
+ c.perfDataObject[0].AbMatchesPerSec,
"find",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
- data[abPropertyReadsPerSec].FirstValue,
+ c.perfDataObject[0].AbPropertyReadsPerSec,
"property_read",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
- data[abSearchesPerSec].FirstValue,
+ c.perfDataObject[0].AbSearchesPerSec,
"search",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
- data[abProxyLookupsPerSec].FirstValue,
+ c.perfDataObject[0].AbProxyLookupsPerSec,
"proxy_search",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookClientSessions,
prometheus.GaugeValue,
- data[abClientSessions].FirstValue,
+ c.perfDataObject[0].AbClientSessions,
)
ch <- prometheus.MustNewConstMetric(
c.approximateHighestDistinguishedNameTag,
prometheus.GaugeValue,
- data[approximateHighestDNT].FirstValue,
+ c.perfDataObject[0].ApproximateHighestDNT,
)
ch <- prometheus.MustNewConstMetric(
c.atqEstimatedDelaySeconds,
prometheus.GaugeValue,
- data[atqEstimatedQueueDelay].FirstValue/1000,
+ c.perfDataObject[0].AtqEstimatedQueueDelay/1000,
)
ch <- prometheus.MustNewConstMetric(
c.atqOutstandingRequests,
prometheus.GaugeValue,
- data[atqOutstandingQueuedRequests].FirstValue,
+ c.perfDataObject[0].AtqOutstandingQueuedRequests,
)
ch <- prometheus.MustNewConstMetric(
c.atqAverageRequestLatency,
prometheus.GaugeValue,
- data[atqRequestLatency].FirstValue,
+ c.perfDataObject[0].AtqRequestLatency,
)
ch <- prometheus.MustNewConstMetric(
c.atqCurrentThreads,
prometheus.GaugeValue,
- data[atqThreadsLDAP].FirstValue,
+ c.perfDataObject[0].AtqThreadsLDAP,
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.atqCurrentThreads,
prometheus.GaugeValue,
- data[atqThreadsOther].FirstValue,
+ c.perfDataObject[0].AtqThreadsOther,
"other",
)
ch <- prometheus.MustNewConstMetric(
c.searchesTotal,
prometheus.CounterValue,
- data[baseSearchesPerSec].FirstValue,
+ c.perfDataObject[0].BaseSearchesPerSec,
"base",
)
ch <- prometheus.MustNewConstMetric(
c.searchesTotal,
prometheus.CounterValue,
- data[subtreeSearchesPerSec].FirstValue,
+ c.perfDataObject[0].SubtreeSearchesPerSec,
"subtree",
)
ch <- prometheus.MustNewConstMetric(
c.searchesTotal,
prometheus.CounterValue,
- data[oneLevelSearchesPerSec].FirstValue,
+ c.perfDataObject[0].OneLevelSearchesPerSec,
"one_level",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
- data[databaseAddsPerSec].FirstValue,
+ c.perfDataObject[0].DatabaseAddsPerSec,
"add",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
- data[databaseDeletesPerSec].FirstValue,
+ c.perfDataObject[0].DatabaseDeletesPerSec,
"delete",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
- data[databaseModifiesPerSec].FirstValue,
+ c.perfDataObject[0].DatabaseModifiesPerSec,
"modify",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
- data[databaseRecyclesPerSec].FirstValue,
+ c.perfDataObject[0].DatabaseRecyclesPerSec,
"recycle",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[digestBindsPerSec].FirstValue,
+ c.perfDataObject[0].DigestBindsPerSec,
"digest",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[dsClientBindsPerSec].FirstValue,
+ c.perfDataObject[0].DsClientBindsPerSec,
"ds_client",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[dsServerBindsPerSec].FirstValue,
+ c.perfDataObject[0].DsServerBindsPerSec,
"ds_server",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[externalBindsPerSec].FirstValue,
+ c.perfDataObject[0].ExternalBindsPerSec,
"external",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[fastBindsPerSec].FirstValue,
+ c.perfDataObject[0].FastBindsPerSec,
"fast",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[negotiatedBindsPerSec].FirstValue,
+ c.perfDataObject[0].NegotiatedBindsPerSec,
"negotiate",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[ntlmBindsPerSec].FirstValue,
+ c.perfDataObject[0].NTLMBindsPerSec,
"ntlm",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[simpleBindsPerSec].FirstValue,
+ c.perfDataObject[0].SimpleBindsPerSec,
"simple",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
- data[ldapSuccessfulBindsPerSec].FirstValue,
+ c.perfDataObject[0].LdapSuccessfulBindsPerSec,
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.replicationHighestUsn,
prometheus.CounterValue,
- float64(uint64(data[draHighestUSNCommittedHighPart].FirstValue)<<32)+data[draHighestUSNCommittedLowPart].FirstValue,
+ float64(uint64(c.perfDataObject[0].DRAHighestUSNCommittedHighPart)<<32)+c.perfDataObject[0].DRAHighestUSNCommittedLowPart,
"committed",
)
ch <- prometheus.MustNewConstMetric(
c.replicationHighestUsn,
prometheus.CounterValue,
- float64(uint64(data[draHighestUSNIssuedHighPart].FirstValue)<<32)+data[draHighestUSNIssuedLowPart].FirstValue,
+ float64(uint64(c.perfDataObject[0].DRAHighestUSNIssuedHighPart)<<32)+c.perfDataObject[0].DRAHighestUSNIssuedLowPart,
"issued",
)
ch <- prometheus.MustNewConstMetric(
c.interSiteReplicationDataBytesTotal,
prometheus.CounterValue,
- data[draInboundBytesCompressedBetweenSitesAfterCompressionPerSec].FirstValue,
+ c.perfDataObject[0].DRAInboundBytesCompressedBetweenSitesAfterCompressionPerSec,
"inbound",
)
// The pre-compression perfData size seems to have little value? Skipping for now
@@ -879,7 +727,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.interSiteReplicationDataBytesTotal,
prometheus.CounterValue,
- data[draOutboundBytesCompressedBetweenSitesAfterCompressionPerSec].FirstValue,
+ c.perfDataObject[0].DRAOutboundBytesCompressedBetweenSitesAfterCompressionPerSec,
"outbound",
)
// ch <- prometheus.MustNewConstMetric(
@@ -891,270 +739,270 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.intraSiteReplicationDataBytesTotal,
prometheus.CounterValue,
- data[draInboundBytesNotCompressedWithinSitePerSec].FirstValue,
+ c.perfDataObject[0].DRAInboundBytesNotCompressedWithinSitePerSec,
"inbound",
)
ch <- prometheus.MustNewConstMetric(
c.intraSiteReplicationDataBytesTotal,
prometheus.CounterValue,
- data[draOutboundBytesNotCompressedWithinSitePerSec].FirstValue,
+ c.perfDataObject[0].DRAOutboundBytesNotCompressedWithinSitePerSec,
"outbound",
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundSyncObjectsRemaining,
prometheus.GaugeValue,
- data[draInboundFullSyncObjectsRemaining].FirstValue,
+ c.perfDataObject[0].DRAInboundFullSyncObjectsRemaining,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundLinkValueUpdatesRemaining,
prometheus.GaugeValue,
- data[draInboundLinkValueUpdatesRemainingInPacket].FirstValue,
+ c.perfDataObject[0].DRAInboundLinkValueUpdatesRemainingInPacket,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundObjectsUpdatedTotal,
prometheus.CounterValue,
- data[draInboundObjectsAppliedPerSec].FirstValue,
+ c.perfDataObject[0].DRAInboundObjectsAppliedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundObjectsFilteredTotal,
prometheus.CounterValue,
- data[draInboundObjectsFilteredPerSec].FirstValue,
+ c.perfDataObject[0].DRAInboundObjectsFilteredPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundPropertiesUpdatedTotal,
prometheus.CounterValue,
- data[draInboundPropertiesAppliedPerSec].FirstValue,
+ c.perfDataObject[0].DRAInboundPropertiesAppliedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundPropertiesFilteredTotal,
prometheus.CounterValue,
- data[draInboundPropertiesFilteredPerSec].FirstValue,
+ c.perfDataObject[0].DRAInboundPropertiesFilteredPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.replicationPendingOperations,
prometheus.GaugeValue,
- data[draPendingReplicationOperations].FirstValue,
+ c.perfDataObject[0].DRAPendingReplicationOperations,
)
ch <- prometheus.MustNewConstMetric(
c.replicationPendingSynchronizations,
prometheus.GaugeValue,
- data[draPendingReplicationSynchronizations].FirstValue,
+ c.perfDataObject[0].DRAPendingReplicationSynchronizations,
)
ch <- prometheus.MustNewConstMetric(
c.replicationSyncRequestsTotal,
prometheus.CounterValue,
- data[draSyncRequestsMade].FirstValue,
+ c.perfDataObject[0].DRASyncRequestsMade,
)
ch <- prometheus.MustNewConstMetric(
c.replicationSyncRequestsSuccessTotal,
prometheus.CounterValue,
- data[draSyncRequestsSuccessful].FirstValue,
+ c.perfDataObject[0].DRASyncRequestsSuccessful,
)
ch <- prometheus.MustNewConstMetric(
c.replicationSyncRequestsSchemaMismatchFailureTotal,
prometheus.CounterValue,
- data[draSyncFailuresOnSchemaMismatch].FirstValue,
+ c.perfDataObject[0].DRASyncFailuresOnSchemaMismatch,
)
ch <- prometheus.MustNewConstMetric(
c.nameTranslationsTotal,
prometheus.CounterValue,
- data[dsClientNameTranslationsPerSec].FirstValue,
+ c.perfDataObject[0].DsClientNameTranslationsPerSec,
"client",
)
ch <- prometheus.MustNewConstMetric(
c.nameTranslationsTotal,
prometheus.CounterValue,
- data[dsServerNameTranslationsPerSec].FirstValue,
+ c.perfDataObject[0].DsServerNameTranslationsPerSec,
"server",
)
ch <- prometheus.MustNewConstMetric(
c.changeMonitorsRegistered,
prometheus.GaugeValue,
- data[dsMonitorListSize].FirstValue,
+ c.perfDataObject[0].DsMonitorListSize,
)
ch <- prometheus.MustNewConstMetric(
c.changeMonitorUpdatesPending,
prometheus.GaugeValue,
- data[dsNotifyQueueSize].FirstValue,
+ c.perfDataObject[0].DsNotifyQueueSize,
)
ch <- prometheus.MustNewConstMetric(
c.nameCacheHitsTotal,
prometheus.CounterValue,
- data[dsNameCacheHitRate].FirstValue,
+ c.perfDataObject[0].DsNameCacheHitRate,
)
ch <- prometheus.MustNewConstMetric(
c.nameCacheLookupsTotal,
prometheus.CounterValue,
- data[dsNameCacheHitRate].SecondValue,
+ c.perfDataObject[0].DsNameCacheHitRateSecondValue,
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsFromDRA].FirstValue,
+ c.perfDataObject[0].DsPercentReadsFromDRA,
"read",
"replication_agent",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsFromKCC].FirstValue,
+ c.perfDataObject[0].DsPercentReadsFromKCC,
"read",
"knowledge_consistency_checker",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsFromLSA].FirstValue,
+ c.perfDataObject[0].DsPercentReadsFromLSA,
"read",
"local_security_authority",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsFromNSPI].FirstValue,
+ c.perfDataObject[0].DsPercentReadsFromNSPI,
"read",
"name_service_provider_interface",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsFromNTDSAPI].FirstValue,
+ c.perfDataObject[0].DsPercentReadsFromNTDSAPI,
"read",
"directory_service_api",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsFromSAM].FirstValue,
+ c.perfDataObject[0].DsPercentReadsFromSAM,
"read",
"security_account_manager",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentReadsOther].FirstValue,
+ c.perfDataObject[0].DsPercentReadsOther,
"read",
"other",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromDRA].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromDRA,
"search",
"replication_agent",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromKCC].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromKCC,
"search",
"knowledge_consistency_checker",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromLDAP].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromLDAP,
"search",
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromLSA].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromLSA,
"search",
"local_security_authority",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromNSPI].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromNSPI,
"search",
"name_service_provider_interface",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromNTDSAPI].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromNTDSAPI,
"search",
"directory_service_api",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromSAM].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromSAM,
"search",
"security_account_manager",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesOther].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesOther,
"search",
"other",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesFromDRA].FirstValue,
+ c.perfDataObject[0].DsPercentWritesFromDRA,
"write",
"replication_agent",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesFromKCC].FirstValue,
+ c.perfDataObject[0].DsPercentWritesFromKCC,
"write",
"knowledge_consistency_checker",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesFromLDAP].FirstValue,
+ c.perfDataObject[0].DsPercentWritesFromLDAP,
"write",
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentSearchesFromLSA].FirstValue,
+ c.perfDataObject[0].DsPercentSearchesFromLSA,
"write",
"local_security_authority",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesFromNSPI].FirstValue,
+ c.perfDataObject[0].DsPercentWritesFromNSPI,
"write",
"name_service_provider_interface",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesFromNTDSAPI].FirstValue,
+ c.perfDataObject[0].DsPercentWritesFromNTDSAPI,
"write",
"directory_service_api",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesFromSAM].FirstValue,
+ c.perfDataObject[0].DsPercentWritesFromSAM,
"write",
"security_account_manager",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
- data[dsPercentWritesOther].FirstValue,
+ c.perfDataObject[0].DsPercentWritesOther,
"write",
"other",
)
@@ -1162,207 +1010,207 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.directorySearchSubOperationsTotal,
prometheus.CounterValue,
- data[dsSearchSubOperationsPerSec].FirstValue,
+ c.perfDataObject[0].DsSearchSubOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationEventsTotal,
prometheus.CounterValue,
- data[dsSecurityDescriptorSubOperationsPerSec].FirstValue,
+ c.perfDataObject[0].DsSecurityDescriptorSubOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationEventsQueued,
prometheus.GaugeValue,
- data[dsSecurityDescriptorPropagationsEvents].FirstValue,
+ c.perfDataObject[0].DsSecurityDescriptorPropagationsEvents,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationAccessWaitTotalSeconds,
prometheus.GaugeValue,
- data[dsSecurityDescriptorPropagatorAverageExclusionTime].FirstValue,
+ c.perfDataObject[0].DsSecurityDescriptorPropagatorAverageExclusionTime,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationItemsQueuedTotal,
prometheus.CounterValue,
- data[dsSecurityDescriptorPropagatorRuntimeQueue].FirstValue,
+ c.perfDataObject[0].DsSecurityDescriptorPropagatorRuntimeQueue,
)
ch <- prometheus.MustNewConstMetric(
c.directoryServiceThreads,
prometheus.GaugeValue,
- data[dsThreadsInUse].FirstValue,
+ c.perfDataObject[0].DsThreadsInUse,
)
ch <- prometheus.MustNewConstMetric(
c.ldapClosedConnectionsTotal,
prometheus.CounterValue,
- data[ldapClosedConnectionsPerSec].FirstValue,
+ c.perfDataObject[0].LdapClosedConnectionsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapOpenedConnectionsTotal,
prometheus.CounterValue,
- data[ldapNewConnectionsPerSec].FirstValue,
+ c.perfDataObject[0].LdapNewConnectionsPerSec,
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.ldapOpenedConnectionsTotal,
prometheus.CounterValue,
- data[ldapNewSSLConnectionsPerSec].FirstValue,
+ c.perfDataObject[0].LdapNewSSLConnectionsPerSec,
"ldaps",
)
ch <- prometheus.MustNewConstMetric(
c.ldapActiveThreads,
prometheus.GaugeValue,
- data[ldapActiveThreads].FirstValue,
+ c.perfDataObject[0].LdapActiveThreads,
)
ch <- prometheus.MustNewConstMetric(
c.ldapLastBindTimeSeconds,
prometheus.GaugeValue,
- data[ldapBindTime].FirstValue/1000,
+ c.perfDataObject[0].LdapBindTime/1000,
)
ch <- prometheus.MustNewConstMetric(
c.ldapSearchesTotal,
prometheus.CounterValue,
- data[ldapSearchesPerSec].FirstValue,
+ c.perfDataObject[0].LdapSearchesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapUdpOperationsTotal,
prometheus.CounterValue,
- data[ldapUDPOperationsPerSec].FirstValue,
+ c.perfDataObject[0].LdapUDPOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWritesTotal,
prometheus.CounterValue,
- data[ldapWritesPerSec].FirstValue,
+ c.perfDataObject[0].LdapWritesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapClientSessions,
prometheus.GaugeValue,
- data[ldapClientSessions].FirstValue,
+ c.perfDataObject[0].LdapClientSessions,
)
ch <- prometheus.MustNewConstMetric(
c.linkValuesCleanedTotal,
prometheus.CounterValue,
- data[linkValuesCleanedPerSec].FirstValue,
+ c.perfDataObject[0].LinkValuesCleanedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.phantomObjectsCleanedTotal,
prometheus.CounterValue,
- data[phantomsCleanedPerSec].FirstValue,
+ c.perfDataObject[0].PhantomsCleanedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.phantomObjectsVisitedTotal,
prometheus.CounterValue,
- data[phantomsVisitedPerSec].FirstValue,
+ c.perfDataObject[0].PhantomsVisitedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTotal,
prometheus.CounterValue,
- data[samGlobalGroupMembershipEvaluationsPerSec].FirstValue,
+ c.perfDataObject[0].SamGlobalGroupMembershipEvaluationsPerSec,
"global",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTotal,
prometheus.CounterValue,
- data[samDomainLocalGroupMembershipEvaluationsPerSec].FirstValue,
+ c.perfDataObject[0].SamDomainLocalGroupMembershipEvaluationsPerSec,
"domain_local",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTotal,
prometheus.CounterValue,
- data[samUniversalGroupMembershipEvaluationsPerSec].FirstValue,
+ c.perfDataObject[0].SamUniversalGroupMembershipEvaluationsPerSec,
"universal",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipGlobalCatalogEvaluationsTotal,
prometheus.CounterValue,
- data[samGCEvaluationsPerSec].FirstValue,
+ c.perfDataObject[0].SamGCEvaluationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsNonTransitiveTotal,
prometheus.CounterValue,
- data[samNonTransitiveMembershipEvaluationsPerSec].FirstValue,
+ c.perfDataObject[0].SamNonTransitiveMembershipEvaluationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTransitiveTotal,
prometheus.CounterValue,
- data[samTransitiveMembershipEvaluationsPerSec].FirstValue,
+ c.perfDataObject[0].SamTransitiveMembershipEvaluationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samGroupEvaluationLatency,
prometheus.GaugeValue,
- data[samAccountGroupEvaluationLatency].FirstValue,
+ c.perfDataObject[0].SamAccountGroupEvaluationLatency,
"account_group",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupEvaluationLatency,
prometheus.GaugeValue,
- data[samResourceGroupEvaluationLatency].FirstValue,
+ c.perfDataObject[0].SamResourceGroupEvaluationLatency,
"resource_group",
)
ch <- prometheus.MustNewConstMetric(
c.samComputerCreationRequestsTotal,
prometheus.CounterValue,
- data[samSuccessfulComputerCreationsPerSecIncludesAllRequests].FirstValue,
+ c.perfDataObject[0].SamSuccessfulComputerCreationsPerSecIncludesAllRequests,
)
ch <- prometheus.MustNewConstMetric(
c.samComputerCreationSuccessfulRequestsTotal,
prometheus.CounterValue,
- data[samMachineCreationAttemptsPerSec].FirstValue,
+ c.perfDataObject[0].SamMachineCreationAttemptsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samUserCreationRequestsTotal,
prometheus.CounterValue,
- data[samUserCreationAttemptsPerSec].FirstValue,
+ c.perfDataObject[0].SamUserCreationAttemptsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samUserCreationSuccessfulRequestsTotal,
prometheus.CounterValue,
- data[samSuccessfulUserCreationsPerSec].FirstValue,
+ c.perfDataObject[0].SamSuccessfulUserCreationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samQueryDisplayRequestsTotal,
prometheus.CounterValue,
- data[samDisplayInformationQueriesPerSec].FirstValue,
+ c.perfDataObject[0].SamDisplayInformationQueriesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samEnumerationsTotal,
prometheus.CounterValue,
- data[samEnumerationsPerSec].FirstValue,
+ c.perfDataObject[0].SamEnumerationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samMembershipChangesTotal,
prometheus.CounterValue,
- data[samMembershipChangesPerSec].FirstValue,
+ c.perfDataObject[0].SamMembershipChangesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samPasswordChangesTotal,
prometheus.CounterValue,
- data[samPasswordChangesPerSec].FirstValue,
+ c.perfDataObject[0].SamPasswordChangesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.tombstonesObjectsCollectedTotal,
prometheus.CounterValue,
- data[tombstonesGarbageCollectedPerSec].FirstValue,
+ c.perfDataObject[0].TombstonesGarbageCollectedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.tombstonesObjectsVisitedTotal,
prometheus.CounterValue,
- data[tombstonesVisitedPerSec].FirstValue,
+ c.perfDataObject[0].TombstonesVisitedPerSec,
)
return nil
diff --git a/internal/collector/ad/const.go b/internal/collector/ad/const.go
deleted file mode 100644
index 967171986..000000000
--- a/internal/collector/ad/const.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package ad
-
-const (
- abANRPerSec = "AB ANR/sec"
- abBrowsesPerSec = "AB Browses/sec"
- abClientSessions = "AB Client Sessions"
- abMatchesPerSec = "AB Matches/sec"
- abPropertyReadsPerSec = "AB Property Reads/sec"
- abProxyLookupsPerSec = "AB Proxy Lookups/sec"
- abSearchesPerSec = "AB Searches/sec"
- approximateHighestDNT = "Approximate highest DNT"
- atqEstimatedQueueDelay = "ATQ Estimated Queue Delay"
- atqOutstandingQueuedRequests = "ATQ Outstanding Queued Requests"
- _ = "ATQ Queue Latency"
- atqRequestLatency = "ATQ Request Latency"
- atqThreadsLDAP = "ATQ Threads LDAP"
- atqThreadsOther = "ATQ Threads Other"
- atqThreadsTotal = "ATQ Threads Total"
- baseSearchesPerSec = "Base searches/sec"
- databaseAddsPerSec = "Database adds/sec"
- databaseDeletesPerSec = "Database deletes/sec"
- databaseModifiesPerSec = "Database modifys/sec"
- databaseRecyclesPerSec = "Database recycles/sec"
- digestBindsPerSec = "Digest Binds/sec"
- _ = "DirSync session throttling rate"
- _ = "DirSync sessions in progress"
- draHighestUSNCommittedHighPart = "DRA Highest USN Committed (High part)"
- draHighestUSNCommittedLowPart = "DRA Highest USN Committed (Low part)"
- draHighestUSNIssuedHighPart = "DRA Highest USN Issued (High part)"
- draHighestUSNIssuedLowPart = "DRA Highest USN Issued (Low part)"
- draInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot = "DRA Inbound Bytes Compressed (Between Sites, After Compression) Since Boot"
- draInboundBytesCompressedBetweenSitesAfterCompressionPerSec = "DRA Inbound Bytes Compressed (Between Sites, After Compression)/sec"
- draInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot = "DRA Inbound Bytes Compressed (Between Sites, Before Compression) Since Boot"
- draInboundBytesCompressedBetweenSitesBeforeCompressionPerSec = "DRA Inbound Bytes Compressed (Between Sites, Before Compression)/sec"
- draInboundBytesNotCompressedWithinSiteSinceBoot = "DRA Inbound Bytes Not Compressed (Within Site) Since Boot"
- draInboundBytesNotCompressedWithinSitePerSec = "DRA Inbound Bytes Not Compressed (Within Site)/sec"
- draInboundBytesTotalSinceBoot = "DRA Inbound Bytes Total Since Boot"
- draInboundBytesTotalPerSec = "DRA Inbound Bytes Total/sec"
- draInboundFullSyncObjectsRemaining = "DRA Inbound Full Sync Objects Remaining"
- draInboundLinkValueUpdatesRemainingInPacket = "DRA Inbound Link Value Updates Remaining in Packet"
- _ = "DRA Inbound Link Values/sec"
- draInboundObjectUpdatesRemainingInPacket = "DRA Inbound Object Updates Remaining in Packet"
- draInboundObjectsAppliedPerSec = "DRA Inbound Objects Applied/sec"
- draInboundObjectsFilteredPerSec = "DRA Inbound Objects Filtered/sec"
- draInboundObjectsPerSec = "DRA Inbound Objects/sec"
- draInboundPropertiesAppliedPerSec = "DRA Inbound Properties Applied/sec"
- draInboundPropertiesFilteredPerSec = "DRA Inbound Properties Filtered/sec"
- draInboundPropertiesTotalPerSec = "DRA Inbound Properties Total/sec"
- _ = "DRA Inbound Sync Link Deletion/sec"
- draInboundTotalUpdatesRemainingInPacket = "DRA Inbound Total Updates Remaining in Packet"
- draInboundValuesDNsOnlyPerSec = "DRA Inbound Values (DNs only)/sec"
- draInboundValuesTotalPerSec = "DRA Inbound Values Total/sec"
- _ = "DRA number of NC replication calls since boot"
- _ = "DRA number of successful NC replication calls since boot"
- draOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot = "DRA Outbound Bytes Compressed (Between Sites, After Compression) Since Boot"
- draOutboundBytesCompressedBetweenSitesAfterCompressionPerSec = "DRA Outbound Bytes Compressed (Between Sites, After Compression)/sec"
- draOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot = "DRA Outbound Bytes Compressed (Between Sites, Before Compression) Since Boot"
- draOutboundBytesCompressedBetweenSitesBeforeCompressionPerSec = "DRA Outbound Bytes Compressed (Between Sites, Before Compression)/sec"
- draOutboundBytesNotCompressedWithinSiteSinceBoot = "DRA Outbound Bytes Not Compressed (Within Site) Since Boot"
- draOutboundBytesNotCompressedWithinSitePerSec = "DRA Outbound Bytes Not Compressed (Within Site)/sec"
- draOutboundBytesTotalSinceBoot = "DRA Outbound Bytes Total Since Boot"
- draOutboundBytesTotalPerSec = "DRA Outbound Bytes Total/sec"
- draOutboundObjectsFilteredPerSec = "DRA Outbound Objects Filtered/sec"
- draOutboundObjectsPerSec = "DRA Outbound Objects/sec"
- draOutboundPropertiesPerSec = "DRA Outbound Properties/sec"
- draOutboundValuesDNsOnlyPerSec = "DRA Outbound Values (DNs only)/sec"
- draOutboundValuesTotalPerSec = "DRA Outbound Values Total/sec"
- draPendingReplicationOperations = "DRA Pending Replication Operations"
- draPendingReplicationSynchronizations = "DRA Pending Replication Synchronizations"
- draSyncFailuresOnSchemaMismatch = "DRA Sync Failures on Schema Mismatch"
- draSyncRequestsMade = "DRA Sync Requests Made"
- draSyncRequestsSuccessful = "DRA Sync Requests Successful"
- draThreadsGettingNCChanges = "DRA Threads Getting NC Changes"
- draThreadsGettingNCChangesHoldingSemaphore = "DRA Threads Getting NC Changes Holding Semaphore"
- _ = "DRA total number of Busy failures since boot"
- _ = "DRA total number of MissingParent failures since boot"
- _ = "DRA total number of NotEnoughAttrs/MissingObject failures since boot"
- _ = "DRA total number of Preempted failures since boot"
- _ = "DRA total time of applying replication package since boot"
- _ = "DRA total time of NC replication calls since boot"
- _ = "DRA total time of successful NC replication calls since boot"
- _ = "DRA total time of successfully applying replication package since boot"
- _ = "DRA total time on waiting async replication packages since boot"
- _ = "DRA total time on waiting sync replication packages since boot"
- dsPercentReadsFromDRA = "DS % Reads from DRA"
- dsPercentReadsFromKCC = "DS % Reads from KCC"
- dsPercentReadsFromLSA = "DS % Reads from LSA"
- dsPercentReadsFromNSPI = "DS % Reads from NSPI"
- dsPercentReadsFromNTDSAPI = "DS % Reads from NTDSAPI"
- dsPercentReadsFromSAM = "DS % Reads from SAM"
- dsPercentReadsOther = "DS % Reads Other"
- dsPercentSearchesFromDRA = "DS % Searches from DRA"
- dsPercentSearchesFromKCC = "DS % Searches from KCC"
- dsPercentSearchesFromLDAP = "DS % Searches from LDAP"
- dsPercentSearchesFromLSA = "DS % Searches from LSA"
- dsPercentSearchesFromNSPI = "DS % Searches from NSPI"
- dsPercentSearchesFromNTDSAPI = "DS % Searches from NTDSAPI"
- dsPercentSearchesFromSAM = "DS % Searches from SAM"
- dsPercentSearchesOther = "DS % Searches Other"
- dsPercentWritesFromDRA = "DS % Writes from DRA"
- dsPercentWritesFromKCC = "DS % Writes from KCC"
- dsPercentWritesFromLDAP = "DS % Writes from LDAP"
- dsPercentWritesFromLSA = "DS % Writes from LSA"
- dsPercentWritesFromNSPI = "DS % Writes from NSPI"
- dsPercentWritesFromNTDSAPI = "DS % Writes from NTDSAPI"
- dsPercentWritesFromSAM = "DS % Writes from SAM"
- dsPercentWritesOther = "DS % Writes Other"
- dsClientBindsPerSec = "DS Client Binds/sec"
- dsClientNameTranslationsPerSec = "DS Client Name Translations/sec"
- dsDirectoryReadsPerSec = "DS Directory Reads/sec"
- dsDirectorySearchesPerSec = "DS Directory Searches/sec"
- dsDirectoryWritesPerSec = "DS Directory Writes/sec"
- dsMonitorListSize = "DS Monitor List Size"
- dsNameCacheHitRate = "DS Name Cache hit rate"
- dsNotifyQueueSize = "DS Notify Queue Size"
- dsSearchSubOperationsPerSec = "DS Search sub-operations/sec"
- dsSecurityDescriptorPropagationsEvents = "DS Security Descriptor Propagations Events"
- dsSecurityDescriptorPropagatorAverageExclusionTime = "DS Security Descriptor Propagator Average Exclusion Time"
- dsSecurityDescriptorPropagatorRuntimeQueue = "DS Security Descriptor Propagator Runtime Queue"
- dsSecurityDescriptorSubOperationsPerSec = "DS Security Descriptor sub-operations/sec"
- dsServerBindsPerSec = "DS Server Binds/sec"
- dsServerNameTranslationsPerSec = "DS Server Name Translations/sec"
- dsThreadsInUse = "DS Threads in Use"
- _ = "Error eventlogs since boot"
- _ = "Error events since boot"
- externalBindsPerSec = "External Binds/sec"
- fastBindsPerSec = "Fast Binds/sec"
- _ = "Fatal events since boot"
- _ = "Info eventlogs since boot"
- ldapActiveThreads = "LDAP Active Threads"
- _ = "LDAP Add Operations"
- _ = "LDAP Add Operations/sec"
- _ = "LDAP batch slots available"
- ldapBindTime = "LDAP Bind Time"
- _ = "LDAP busy retries"
- _ = "LDAP busy retries/sec"
- ldapClientSessions = "LDAP Client Sessions"
- ldapClosedConnectionsPerSec = "LDAP Closed Connections/sec"
- _ = "LDAP Delete Operations"
- _ = "LDAP Delete Operations/sec"
- _ = "LDAP Modify DN Operations"
- _ = "LDAP Modify DN Operations/sec"
- _ = "LDAP Modify Operations"
- _ = "LDAP Modify Operations/sec"
- ldapNewConnectionsPerSec = "LDAP New Connections/sec"
- ldapNewSSLConnectionsPerSec = "LDAP New SSL Connections/sec"
- _ = "LDAP Outbound Bytes"
- _ = "LDAP Outbound Bytes/sec"
- _ = "LDAP Page Search Cache entries count"
- _ = "LDAP Page Search Cache size"
- ldapSearchesPerSec = "LDAP Searches/sec"
- ldapSuccessfulBindsPerSec = "LDAP Successful Binds/sec"
- _ = "LDAP Threads Sleeping on BUSY"
- ldapUDPOperationsPerSec = "LDAP UDP operations/sec"
- ldapWritesPerSec = "LDAP Writes/sec"
- linkValuesCleanedPerSec = "Link Values Cleaned/sec"
- _ = "Links added"
- _ = "Links added/sec"
- _ = "Links visited"
- _ = "Links visited/sec"
- _ = "Logical link deletes"
- _ = "Logical link deletes/sec"
- negotiatedBindsPerSec = "Negotiated Binds/sec"
- ntlmBindsPerSec = "NTLM Binds/sec"
- _ = "Objects returned"
- _ = "Objects returned/sec"
- _ = "Objects visited"
- _ = "Objects visited/sec"
- oneLevelSearchesPerSec = "Onelevel searches/sec"
- _ = "PDC failed password update notifications"
- _ = "PDC password update notifications/sec"
- _ = "PDC successful password update notifications"
- phantomsCleanedPerSec = "Phantoms Cleaned/sec"
- phantomsVisitedPerSec = "Phantoms Visited/sec"
- _ = "Physical link deletes"
- _ = "Physical link deletes/sec"
- _ = "Replicate Single Object operations"
- _ = "Replicate Single Object operations/sec"
- _ = "RID Pool invalidations since boot"
- _ = "RID Pool request failures since boot"
- _ = "RID Pool request successes since boot"
- samAccountGroupEvaluationLatency = "SAM Account Group Evaluation Latency"
- samDisplayInformationQueriesPerSec = "SAM Display Information Queries/sec"
- samDomainLocalGroupMembershipEvaluationsPerSec = "SAM Domain Local Group Membership Evaluations/sec"
- samEnumerationsPerSec = "SAM Enumerations/sec"
- samGCEvaluationsPerSec = "SAM GC Evaluations/sec"
- samGlobalGroupMembershipEvaluationsPerSec = "SAM Global Group Membership Evaluations/sec"
- samMachineCreationAttemptsPerSec = "SAM Machine Creation Attempts/sec"
- samMembershipChangesPerSec = "SAM Membership Changes/sec"
- samNonTransitiveMembershipEvaluationsPerSec = "SAM Non-Transitive Membership Evaluations/sec"
- samPasswordChangesPerSec = "SAM Password Changes/sec"
- samResourceGroupEvaluationLatency = "SAM Resource Group Evaluation Latency"
- samSuccessfulComputerCreationsPerSecIncludesAllRequests = "SAM Successful Computer Creations/sec: Includes all requests"
- samSuccessfulUserCreationsPerSec = "SAM Successful User Creations/sec"
- samTransitiveMembershipEvaluationsPerSec = "SAM Transitive Membership Evaluations/sec"
- samUniversalGroupMembershipEvaluationsPerSec = "SAM Universal Group Membership Evaluations/sec"
- samUserCreationAttemptsPerSec = "SAM User Creation Attempts/sec"
- simpleBindsPerSec = "Simple Binds/sec"
- subtreeSearchesPerSec = "Subtree searches/sec"
- tombstonesGarbageCollectedPerSec = "Tombstones Garbage Collected/sec"
- tombstonesVisitedPerSec = "Tombstones Visited/sec"
- transitiveOperationsMillisecondsRun = "Transitive operations milliseconds run"
- transitiveOperationsPerSec = "Transitive operations/sec"
- transitiveSubOperationsPerSec = "Transitive suboperations/sec"
- _ = "Warning eventlogs since boot"
- _ = "Warning events since boot"
-)
diff --git a/internal/collector/ad/types.go b/internal/collector/ad/types.go
new file mode 100644
index 000000000..e2f38b240
--- /dev/null
+++ b/internal/collector/ad/types.go
@@ -0,0 +1,223 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package ad
+
+type perfDataCounterValues struct {
+ AbANRPerSec float64 `perfdata:"AB ANR/sec"`
+ AbBrowsesPerSec float64 `perfdata:"AB Browses/sec"`
+ AbClientSessions float64 `perfdata:"AB Client Sessions"`
+ AbMatchesPerSec float64 `perfdata:"AB Matches/sec"`
+ AbPropertyReadsPerSec float64 `perfdata:"AB Property Reads/sec"`
+ AbProxyLookupsPerSec float64 `perfdata:"AB Proxy Lookups/sec"`
+ AbSearchesPerSec float64 `perfdata:"AB Searches/sec"`
+ ApproximateHighestDNT float64 `perfdata:"Approximate highest DNT"`
+ AtqEstimatedQueueDelay float64 `perfdata:"ATQ Estimated Queue Delay"`
+ AtqOutstandingQueuedRequests float64 `perfdata:"ATQ Outstanding Queued Requests"`
+ _ float64 `perfdata:"ATQ Queue Latency"`
+ AtqRequestLatency float64 `perfdata:"ATQ Request Latency"`
+ AtqThreadsLDAP float64 `perfdata:"ATQ Threads LDAP"`
+ AtqThreadsOther float64 `perfdata:"ATQ Threads Other"`
+ AtqThreadsTotal float64 `perfdata:"ATQ Threads Total"`
+ BaseSearchesPerSec float64 `perfdata:"Base searches/sec"`
+ DatabaseAddsPerSec float64 `perfdata:"Database adds/sec"`
+ DatabaseDeletesPerSec float64 `perfdata:"Database deletes/sec"`
+ DatabaseModifiesPerSec float64 `perfdata:"Database modifys/sec"`
+ DatabaseRecyclesPerSec float64 `perfdata:"Database recycles/sec"`
+ DigestBindsPerSec float64 `perfdata:"Digest Binds/sec"`
+ _ float64 `perfdata:"DirSync session throttling rate"`
+ _ float64 `perfdata:"DirSync sessions in progress"`
+ DRAHighestUSNCommittedHighPart float64 `perfdata:"DRA Highest USN Committed (High part)"`
+ DRAHighestUSNCommittedLowPart float64 `perfdata:"DRA Highest USN Committed (Low part)"`
+ DRAHighestUSNIssuedHighPart float64 `perfdata:"DRA Highest USN Issued (High part)"`
+ DRAHighestUSNIssuedLowPart float64 `perfdata:"DRA Highest USN Issued (Low part)"`
+ DRAInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot float64 `perfdata:"DRA Inbound Bytes Compressed (Between Sites, After Compression) Since Boot"`
+ DRAInboundBytesCompressedBetweenSitesAfterCompressionPerSec float64 `perfdata:"DRA Inbound Bytes Compressed (Between Sites, After Compression)/sec"`
+ DRAInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot float64 `perfdata:"DRA Inbound Bytes Compressed (Between Sites, Before Compression) Since Boot"`
+ DRAInboundBytesCompressedBetweenSitesBeforeCompressionPerSec float64 `perfdata:"DRA Inbound Bytes Compressed (Between Sites, Before Compression)/sec"`
+ DRAInboundBytesNotCompressedWithinSiteSinceBoot float64 `perfdata:"DRA Inbound Bytes Not Compressed (Within Site) Since Boot"`
+ DRAInboundBytesNotCompressedWithinSitePerSec float64 `perfdata:"DRA Inbound Bytes Not Compressed (Within Site)/sec"`
+ DRAInboundBytesTotalSinceBoot float64 `perfdata:"DRA Inbound Bytes Total Since Boot"`
+ DRAInboundBytesTotalPerSec float64 `perfdata:"DRA Inbound Bytes Total/sec"`
+ DRAInboundFullSyncObjectsRemaining float64 `perfdata:"DRA Inbound Full Sync Objects Remaining"`
+ DRAInboundLinkValueUpdatesRemainingInPacket float64 `perfdata:"DRA Inbound Link Value Updates Remaining in Packet"`
+ _ float64 `perfdata:"DRA Inbound Link Values/sec"`
+ DRAInboundObjectUpdatesRemainingInPacket float64 `perfdata:"DRA Inbound Object Updates Remaining in Packet"`
+ DRAInboundObjectsAppliedPerSec float64 `perfdata:"DRA Inbound Objects Applied/sec"`
+ DRAInboundObjectsFilteredPerSec float64 `perfdata:"DRA Inbound Objects Filtered/sec"`
+ DRAInboundObjectsPerSec float64 `perfdata:"DRA Inbound Objects/sec"`
+ DRAInboundPropertiesAppliedPerSec float64 `perfdata:"DRA Inbound Properties Applied/sec"`
+ DRAInboundPropertiesFilteredPerSec float64 `perfdata:"DRA Inbound Properties Filtered/sec"`
+ DRAInboundPropertiesTotalPerSec float64 `perfdata:"DRA Inbound Properties Total/sec"`
+ _ float64 `perfdata:"DRA Inbound Sync Link Deletion/sec"`
+ DRAInboundTotalUpdatesRemainingInPacket float64 `perfdata:"DRA Inbound Total Updates Remaining in Packet"`
+ DRAInboundValuesDNsOnlyPerSec float64 `perfdata:"DRA Inbound Values (DNs only)/sec"`
+ DRAInboundValuesTotalPerSec float64 `perfdata:"DRA Inbound Values Total/sec"`
+ _ float64 `perfdata:"DRA number of NC replication calls since boot"`
+ _ float64 `perfdata:"DRA number of successful NC replication calls since boot"`
+ DRAOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot float64 `perfdata:"DRA Outbound Bytes Compressed (Between Sites, After Compression) Since Boot"`
+ DRAOutboundBytesCompressedBetweenSitesAfterCompressionPerSec float64 `perfdata:"DRA Outbound Bytes Compressed (Between Sites, After Compression)/sec"`
+ DRAOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot float64 `perfdata:"DRA Outbound Bytes Compressed (Between Sites, Before Compression) Since Boot"`
+ DRAOutboundBytesCompressedBetweenSitesBeforeCompressionPerSec float64 `perfdata:"DRA Outbound Bytes Compressed (Between Sites, Before Compression)/sec"`
+ DRAOutboundBytesNotCompressedWithinSiteSinceBoot float64 `perfdata:"DRA Outbound Bytes Not Compressed (Within Site) Since Boot"`
+ DRAOutboundBytesNotCompressedWithinSitePerSec float64 `perfdata:"DRA Outbound Bytes Not Compressed (Within Site)/sec"`
+ DRAOutboundBytesTotalSinceBoot float64 `perfdata:"DRA Outbound Bytes Total Since Boot"`
+ DRAOutboundBytesTotalPerSec float64 `perfdata:"DRA Outbound Bytes Total/sec"`
+ DRAOutboundObjectsFilteredPerSec float64 `perfdata:"DRA Outbound Objects Filtered/sec"`
+ DRAOutboundObjectsPerSec float64 `perfdata:"DRA Outbound Objects/sec"`
+ DRAOutboundPropertiesPerSec float64 `perfdata:"DRA Outbound Properties/sec"`
+ DRAOutboundValuesDNsOnlyPerSec float64 `perfdata:"DRA Outbound Values (DNs only)/sec"`
+ DRAOutboundValuesTotalPerSec float64 `perfdata:"DRA Outbound Values Total/sec"`
+ DRAPendingReplicationOperations float64 `perfdata:"DRA Pending Replication Operations"`
+ DRAPendingReplicationSynchronizations float64 `perfdata:"DRA Pending Replication Synchronizations"`
+ DRASyncFailuresOnSchemaMismatch float64 `perfdata:"DRA Sync Failures on Schema Mismatch"`
+ DRASyncRequestsMade float64 `perfdata:"DRA Sync Requests Made"`
+ DRASyncRequestsSuccessful float64 `perfdata:"DRA Sync Requests Successful"`
+ DRAThreadsGettingNCChanges float64 `perfdata:"DRA Threads Getting NC Changes"`
+ DRAThreadsGettingNCChangesHoldingSemaphore float64 `perfdata:"DRA Threads Getting NC Changes Holding Semaphore"`
+ _ float64 `perfdata:"DRA total number of Busy failures since boot"`
+ _ float64 `perfdata:"DRA total number of MissingParent failures since boot"`
+ _ float64 `perfdata:"DRA total number of NotEnoughAttrs/MissingObject failures since boot"`
+ _ float64 `perfdata:"DRA total number of Preempted failures since boot"`
+ _ float64 `perfdata:"DRA total time of applying replication package since boot"`
+ _ float64 `perfdata:"DRA total time of NC replication calls since boot"`
+ _ float64 `perfdata:"DRA total time of successful NC replication calls since boot"`
+ _ float64 `perfdata:"DRA total time of successfully applying replication package since boot"`
+ _ float64 `perfdata:"DRA total time on waiting async replication packages since boot"`
+ _ float64 `perfdata:"DRA total time on waiting sync replication packages since boot"`
+ DsPercentReadsFromDRA float64 `perfdata:"DS % Reads from DRA"`
+ DsPercentReadsFromKCC float64 `perfdata:"DS % Reads from KCC"`
+ DsPercentReadsFromLSA float64 `perfdata:"DS % Reads from LSA"`
+ DsPercentReadsFromNSPI float64 `perfdata:"DS % Reads from NSPI"`
+ DsPercentReadsFromNTDSAPI float64 `perfdata:"DS % Reads from NTDSAPI"`
+ DsPercentReadsFromSAM float64 `perfdata:"DS % Reads from SAM"`
+ DsPercentReadsOther float64 `perfdata:"DS % Reads Other"`
+ DsPercentSearchesFromDRA float64 `perfdata:"DS % Searches from DRA"`
+ DsPercentSearchesFromKCC float64 `perfdata:"DS % Searches from KCC"`
+ DsPercentSearchesFromLDAP float64 `perfdata:"DS % Searches from LDAP"`
+ DsPercentSearchesFromLSA float64 `perfdata:"DS % Searches from LSA"`
+ DsPercentSearchesFromNSPI float64 `perfdata:"DS % Searches from NSPI"`
+ DsPercentSearchesFromNTDSAPI float64 `perfdata:"DS % Searches from NTDSAPI"`
+ DsPercentSearchesFromSAM float64 `perfdata:"DS % Searches from SAM"`
+ DsPercentSearchesOther float64 `perfdata:"DS % Searches Other"`
+ DsPercentWritesFromDRA float64 `perfdata:"DS % Writes from DRA"`
+ DsPercentWritesFromKCC float64 `perfdata:"DS % Writes from KCC"`
+ DsPercentWritesFromLDAP float64 `perfdata:"DS % Writes from LDAP"`
+ DsPercentWritesFromLSA float64 `perfdata:"DS % Writes from LSA"`
+ DsPercentWritesFromNSPI float64 `perfdata:"DS % Writes from NSPI"`
+ DsPercentWritesFromNTDSAPI float64 `perfdata:"DS % Writes from NTDSAPI"`
+ DsPercentWritesFromSAM float64 `perfdata:"DS % Writes from SAM"`
+ DsPercentWritesOther float64 `perfdata:"DS % Writes Other"`
+ DsClientBindsPerSec float64 `perfdata:"DS Client Binds/sec"`
+ DsClientNameTranslationsPerSec float64 `perfdata:"DS Client Name Translations/sec"`
+ DsDirectoryReadsPerSec float64 `perfdata:"DS Directory Reads/sec"`
+ DsDirectorySearchesPerSec float64 `perfdata:"DS Directory Searches/sec"`
+ DsDirectoryWritesPerSec float64 `perfdata:"DS Directory Writes/sec"`
+ DsMonitorListSize float64 `perfdata:"DS Monitor List Size"`
+ DsNameCacheHitRate float64 `perfdata:"DS Name Cache hit rate"`
+ DsNameCacheHitRateSecondValue float64 `perfdata:"DS Name Cache hit rate,secondvalue"`
+ DsNotifyQueueSize float64 `perfdata:"DS Notify Queue Size"`
+ DsSearchSubOperationsPerSec float64 `perfdata:"DS Search sub-operations/sec"`
+ DsSecurityDescriptorPropagationsEvents float64 `perfdata:"DS Security Descriptor Propagations Events"`
+ DsSecurityDescriptorPropagatorAverageExclusionTime float64 `perfdata:"DS Security Descriptor Propagator Average Exclusion Time"`
+ DsSecurityDescriptorPropagatorRuntimeQueue float64 `perfdata:"DS Security Descriptor Propagator Runtime Queue"`
+ DsSecurityDescriptorSubOperationsPerSec float64 `perfdata:"DS Security Descriptor sub-operations/sec"`
+ DsServerBindsPerSec float64 `perfdata:"DS Server Binds/sec"`
+ DsServerNameTranslationsPerSec float64 `perfdata:"DS Server Name Translations/sec"`
+ DsThreadsInUse float64 `perfdata:"DS Threads in Use"`
+ _ float64 `perfdata:"Error eventlogs since boot"`
+ _ float64 `perfdata:"Error events since boot"`
+ ExternalBindsPerSec float64 `perfdata:"External Binds/sec"`
+ FastBindsPerSec float64 `perfdata:"Fast Binds/sec"`
+ _ float64 `perfdata:"Fatal events since boot"`
+ _ float64 `perfdata:"Info eventlogs since boot"`
+ LdapActiveThreads float64 `perfdata:"LDAP Active Threads"`
+ _ float64 `perfdata:"LDAP Add Operations"`
+ _ float64 `perfdata:"LDAP Add Operations/sec"`
+ _ float64 `perfdata:"LDAP batch slots available"`
+ LdapBindTime float64 `perfdata:"LDAP Bind Time"`
+ _ float64 `perfdata:"LDAP busy retries"`
+ _ float64 `perfdata:"LDAP busy retries/sec"`
+ LdapClientSessions float64 `perfdata:"LDAP Client Sessions"`
+ LdapClosedConnectionsPerSec float64 `perfdata:"LDAP Closed Connections/sec"`
+ _ float64 `perfdata:"LDAP Delete Operations"`
+ _ float64 `perfdata:"LDAP Delete Operations/sec"`
+ _ float64 `perfdata:"LDAP Modify DN Operations"`
+ _ float64 `perfdata:"LDAP Modify DN Operations/sec"`
+ _ float64 `perfdata:"LDAP Modify Operations"`
+ _ float64 `perfdata:"LDAP Modify Operations/sec"`
+ LdapNewConnectionsPerSec float64 `perfdata:"LDAP New Connections/sec"`
+ LdapNewSSLConnectionsPerSec float64 `perfdata:"LDAP New SSL Connections/sec"`
+ _ float64 `perfdata:"LDAP Outbound Bytes"`
+ _ float64 `perfdata:"LDAP Outbound Bytes/sec"`
+ _ float64 `perfdata:"LDAP Page Search Cache entries count"`
+ _ float64 `perfdata:"LDAP Page Search Cache size"`
+ LdapSearchesPerSec float64 `perfdata:"LDAP Searches/sec"`
+ LdapSuccessfulBindsPerSec float64 `perfdata:"LDAP Successful Binds/sec"`
+ _ float64 `perfdata:"LDAP Threads Sleeping on BUSY"`
+ LdapUDPOperationsPerSec float64 `perfdata:"LDAP UDP operations/sec"`
+ LdapWritesPerSec float64 `perfdata:"LDAP Writes/sec"`
+ LinkValuesCleanedPerSec float64 `perfdata:"Link Values Cleaned/sec"`
+ _ float64 `perfdata:"Links added"`
+ _ float64 `perfdata:"Links added/sec"`
+ _ float64 `perfdata:"Links visited"`
+ _ float64 `perfdata:"Links visited/sec"`
+ _ float64 `perfdata:"Logical link deletes"`
+ _ float64 `perfdata:"Logical link deletes/sec"`
+ NegotiatedBindsPerSec float64 `perfdata:"Negotiated Binds/sec"`
+ NTLMBindsPerSec float64 `perfdata:"NTLM Binds/sec"`
+ _ float64 `perfdata:"Objects returned"`
+ _ float64 `perfdata:"Objects returned/sec"`
+ _ float64 `perfdata:"Objects visited"`
+ _ float64 `perfdata:"Objects visited/sec"`
+ OneLevelSearchesPerSec float64 `perfdata:"Onelevel searches/sec"`
+ _ float64 `perfdata:"PDC failed password update notifications"`
+ _ float64 `perfdata:"PDC password update notifications/sec"`
+ _ float64 `perfdata:"PDC successful password update notifications"`
+ PhantomsCleanedPerSec float64 `perfdata:"Phantoms Cleaned/sec"`
+ PhantomsVisitedPerSec float64 `perfdata:"Phantoms Visited/sec"`
+ _ float64 `perfdata:"Physical link deletes"`
+ _ float64 `perfdata:"Physical link deletes/sec"`
+ _ float64 `perfdata:"Replicate Single Object operations"`
+ _ float64 `perfdata:"Replicate Single Object operations/sec"`
+ _ float64 `perfdata:"RID Pool invalidations since boot"`
+ _ float64 `perfdata:"RID Pool request failures since boot"`
+ _ float64 `perfdata:"RID Pool request successes since boot"`
+ SamAccountGroupEvaluationLatency float64 `perfdata:"SAM Account Group Evaluation Latency"`
+ SamDisplayInformationQueriesPerSec float64 `perfdata:"SAM Display Information Queries/sec"`
+ SamDomainLocalGroupMembershipEvaluationsPerSec float64 `perfdata:"SAM Domain Local Group Membership Evaluations/sec"`
+ SamEnumerationsPerSec float64 `perfdata:"SAM Enumerations/sec"`
+ SamGCEvaluationsPerSec float64 `perfdata:"SAM GC Evaluations/sec"`
+ SamGlobalGroupMembershipEvaluationsPerSec float64 `perfdata:"SAM Global Group Membership Evaluations/sec"`
+ SamMachineCreationAttemptsPerSec float64 `perfdata:"SAM Machine Creation Attempts/sec"`
+ SamMembershipChangesPerSec float64 `perfdata:"SAM Membership Changes/sec"`
+ SamNonTransitiveMembershipEvaluationsPerSec float64 `perfdata:"SAM Non-Transitive Membership Evaluations/sec"`
+ SamPasswordChangesPerSec float64 `perfdata:"SAM Password Changes/sec"`
+ SamResourceGroupEvaluationLatency float64 `perfdata:"SAM Resource Group Evaluation Latency"`
+ SamSuccessfulComputerCreationsPerSecIncludesAllRequests float64 `perfdata:"SAM Successful Computer Creations/sec: Includes all requests"`
+ SamSuccessfulUserCreationsPerSec float64 `perfdata:"SAM Successful User Creations/sec"`
+ SamTransitiveMembershipEvaluationsPerSec float64 `perfdata:"SAM Transitive Membership Evaluations/sec"`
+ SamUniversalGroupMembershipEvaluationsPerSec float64 `perfdata:"SAM Universal Group Membership Evaluations/sec"`
+ SamUserCreationAttemptsPerSec float64 `perfdata:"SAM User Creation Attempts/sec"`
+ SimpleBindsPerSec float64 `perfdata:"Simple Binds/sec"`
+ SubtreeSearchesPerSec float64 `perfdata:"Subtree searches/sec"`
+ TombstonesGarbageCollectedPerSec float64 `perfdata:"Tombstones Garbage Collected/sec"`
+ TombstonesVisitedPerSec float64 `perfdata:"Tombstones Visited/sec"`
+ TransitiveOperationsMillisecondsRun float64 `perfdata:"Transitive operations milliseconds run"`
+ TransitiveOperationsPerSec float64 `perfdata:"Transitive operations/sec"`
+ TransitiveSubOperationsPerSec float64 `perfdata:"Transitive suboperations/sec"`
+ _ float64 `perfdata:"Warning eventlogs since boot"`
+ _ float64 `perfdata:"Warning events since boot"`
+}
diff --git a/internal/collector/adcs/adcs.go b/internal/collector/adcs/adcs.go
index dd3ce99e5..0f7a50f72 100644
--- a/internal/collector/adcs/adcs.go
+++ b/internal/collector/adcs/adcs.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -37,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
challengeResponseProcessingTime *prometheus.Desc
challengeResponsesPerSecond *prometheus.Desc
@@ -83,21 +84,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Certification Authority", perfdata.InstancesAll, []string{
- requestsPerSecond,
- requestProcessingTime,
- retrievalsPerSecond,
- retrievalProcessingTime,
- failedRequestsPerSecond,
- issuedRequestsPerSecond,
- pendingRequestsPerSecond,
- requestCryptographicSigningTime,
- requestPolicyModuleProcessingTime,
- challengeResponsesPerSecond,
- challengeResponseProcessingTime,
- signedCertificateTimestampListsPerSecond,
- signedCertificateTimestampListProcessingTime,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Certification Authority", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
@@ -185,93 +172,89 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", types.ErrNoData)
- }
-
- for name, data := range perfData {
+ for _, data := range c.perfDataObject {
ch <- prometheus.MustNewConstMetric(
c.requestsPerSecond,
prometheus.CounterValue,
- data[requestsPerSecond].FirstValue,
- name,
+ data.RequestsPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestProcessingTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[requestProcessingTime].FirstValue),
- name,
+ utils.MilliSecToSec(data.RequestProcessingTime),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalsPerSecond,
prometheus.CounterValue,
- data[retrievalsPerSecond].FirstValue,
- name,
+ data.RetrievalsPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalProcessingTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[retrievalProcessingTime].FirstValue),
- name,
+ utils.MilliSecToSec(data.RetrievalProcessingTime),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failedRequestsPerSecond,
prometheus.CounterValue,
- data[failedRequestsPerSecond].FirstValue,
- name,
+ data.FailedRequestsPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.issuedRequestsPerSecond,
prometheus.CounterValue,
- data[issuedRequestsPerSecond].FirstValue,
- name,
+ data.IssuedRequestsPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pendingRequestsPerSecond,
prometheus.CounterValue,
- data[pendingRequestsPerSecond].FirstValue,
- name,
+ data.PendingRequestsPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestCryptographicSigningTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[requestCryptographicSigningTime].FirstValue),
- name,
+ utils.MilliSecToSec(data.RequestCryptographicSigningTime),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestPolicyModuleProcessingTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[requestPolicyModuleProcessingTime].FirstValue),
- name,
+ utils.MilliSecToSec(data.RequestPolicyModuleProcessingTime),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponsesPerSecond,
prometheus.CounterValue,
- data[challengeResponsesPerSecond].FirstValue,
- name,
+ data.ChallengeResponsesPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponseProcessingTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[challengeResponseProcessingTime].FirstValue),
- name,
+ utils.MilliSecToSec(data.ChallengeResponseProcessingTime),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListsPerSecond,
prometheus.CounterValue,
- data[signedCertificateTimestampListsPerSecond].FirstValue,
- name,
+ data.SignedCertificateTimestampListsPerSecond,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListProcessingTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[signedCertificateTimestampListProcessingTime].FirstValue),
- name,
+ utils.MilliSecToSec(data.SignedCertificateTimestampListProcessingTime),
+ data.Name,
)
}
diff --git a/internal/collector/adcs/const.go b/internal/collector/adcs/const.go
deleted file mode 100644
index 0e7e2e712..000000000
--- a/internal/collector/adcs/const.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package adcs
-
-const (
- challengeResponseProcessingTime = "Challenge Response processing time (ms)"
- challengeResponsesPerSecond = "Challenge Responses/sec"
- failedRequestsPerSecond = "Failed Requests/sec"
- issuedRequestsPerSecond = "Issued Requests/sec"
- pendingRequestsPerSecond = "Pending Requests/sec"
- requestCryptographicSigningTime = "Request cryptographic signing time (ms)"
- requestPolicyModuleProcessingTime = "Request policy module processing time (ms)"
- requestProcessingTime = "Request processing time (ms)"
- requestsPerSecond = "Requests/sec"
- retrievalProcessingTime = "Retrieval processing time (ms)"
- retrievalsPerSecond = "Retrievals/sec"
- signedCertificateTimestampListProcessingTime = "Signed Certificate Timestamp List processing time (ms)"
- signedCertificateTimestampListsPerSecond = "Signed Certificate Timestamp Lists/sec"
-)
diff --git a/internal/collector/adcs/types.go b/internal/collector/adcs/types.go
new file mode 100644
index 000000000..9255089cf
--- /dev/null
+++ b/internal/collector/adcs/types.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package adcs
+
+type perfDataCounterValues struct {
+ Name string
+
+ ChallengeResponseProcessingTime float64 `perfdata:"Challenge Response processing time (ms)"`
+ ChallengeResponsesPerSecond float64 `perfdata:"Challenge Responses/sec"`
+ FailedRequestsPerSecond float64 `perfdata:"Failed Requests/sec"`
+ IssuedRequestsPerSecond float64 `perfdata:"Issued Requests/sec"`
+ PendingRequestsPerSecond float64 `perfdata:"Pending Requests/sec"`
+ RequestCryptographicSigningTime float64 `perfdata:"Request cryptographic signing time (ms)"`
+ RequestPolicyModuleProcessingTime float64 `perfdata:"Request policy module processing time (ms)"`
+ RequestProcessingTime float64 `perfdata:"Request processing time (ms)"`
+ RequestsPerSecond float64 `perfdata:"Requests/sec"`
+ RetrievalProcessingTime float64 `perfdata:"Retrieval processing time (ms)"`
+ RetrievalsPerSecond float64 `perfdata:"Retrievals/sec"`
+ SignedCertificateTimestampListProcessingTime float64 `perfdata:"Signed Certificate Timestamp List processing time (ms)"`
+ SignedCertificateTimestampListsPerSecond float64 `perfdata:"Signed Certificate Timestamp Lists/sec"`
+}
diff --git a/internal/collector/adfs/adfs.go b/internal/collector/adfs/adfs.go
index af57558e7..7d9c89516 100644
--- a/internal/collector/adfs/adfs.go
+++ b/internal/collector/adfs/adfs.go
@@ -18,13 +18,11 @@ package adfs
import (
"fmt"
"log/slog"
- "maps"
"math"
- "slices"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -39,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
adLoginConnectionFailures *prometheus.Desc
artifactDBFailures *prometheus.Desc
@@ -115,51 +114,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("AD FS", nil, []string{
- adLoginConnectionFailures,
- certificateAuthentications,
- deviceAuthentications,
- extranetAccountLockouts,
- federatedAuthentications,
- passportAuthentications,
- passiveRequests,
- passwordChangeFailed,
- passwordChangeSucceeded,
- tokenRequests,
- windowsIntegratedAuthentications,
- oAuthAuthZRequests,
- oAuthClientAuthentications,
- oAuthClientAuthenticationFailures,
- oAuthClientCredentialRequestFailures,
- oAuthClientCredentialRequests,
- oAuthClientPrivateKeyJWTAuthenticationFailures,
- oAuthClientPrivateKeyJWTAuthentications,
- oAuthClientBasicAuthenticationFailures,
- oAuthClientBasicAuthentications,
- oAuthClientSecretPostAuthenticationFailures,
- oAuthClientSecretPostAuthentications,
- oAuthClientWindowsAuthenticationFailures,
- oAuthClientWindowsAuthentications,
- oAuthLogonCertRequestFailures,
- oAuthLogonCertTokenRequests,
- oAuthPasswordGrantRequestFailures,
- oAuthPasswordGrantRequests,
- oAuthTokenRequests,
- samlPTokenRequests,
- ssoAuthenticationFailures,
- ssoAuthentications,
- wsFedTokenRequests,
- wsTrustTokenRequests,
- usernamePasswordAuthenticationFailures,
- usernamePasswordAuthentications,
- externalAuthentications,
- externalAuthNFailures,
- artifactDBFailures,
- avgArtifactDBQueryTime,
- configDBFailures,
- avgConfigDBQueryTime,
- federationMetadataRequests,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("AD FS", nil)
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
@@ -427,279 +382,267 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect ADFS metrics: %w", err)
}
- instanceKey := slices.Collect(maps.Keys(data))
-
- if len(instanceKey) == 0 {
- return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoData)
- }
-
- adfsData, ok := data[instanceKey[0]]
-
- if !ok {
- return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.adLoginConnectionFailures,
prometheus.CounterValue,
- adfsData[adLoginConnectionFailures].FirstValue,
+ c.perfDataObject[0].AdLoginConnectionFailures,
)
ch <- prometheus.MustNewConstMetric(
c.certificateAuthentications,
prometheus.CounterValue,
- adfsData[certificateAuthentications].FirstValue,
+ c.perfDataObject[0].CertificateAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.deviceAuthentications,
prometheus.CounterValue,
- adfsData[deviceAuthentications].FirstValue,
+ c.perfDataObject[0].DeviceAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.extranetAccountLockouts,
prometheus.CounterValue,
- adfsData[extranetAccountLockouts].FirstValue,
+ c.perfDataObject[0].ExtranetAccountLockouts,
)
ch <- prometheus.MustNewConstMetric(
c.federatedAuthentications,
prometheus.CounterValue,
- adfsData[federatedAuthentications].FirstValue,
+ c.perfDataObject[0].FederatedAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.passportAuthentications,
prometheus.CounterValue,
- adfsData[passportAuthentications].FirstValue,
+ c.perfDataObject[0].PassportAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.passiveRequests,
prometheus.CounterValue,
- adfsData[passiveRequests].FirstValue,
+ c.perfDataObject[0].PassiveRequests,
)
ch <- prometheus.MustNewConstMetric(
c.passwordChangeFailed,
prometheus.CounterValue,
- adfsData[passwordChangeFailed].FirstValue,
+ c.perfDataObject[0].PasswordChangeFailed,
)
ch <- prometheus.MustNewConstMetric(
c.passwordChangeSucceeded,
prometheus.CounterValue,
- adfsData[passwordChangeSucceeded].FirstValue,
+ c.perfDataObject[0].PasswordChangeSucceeded,
)
ch <- prometheus.MustNewConstMetric(
c.tokenRequests,
prometheus.CounterValue,
- adfsData[tokenRequests].FirstValue,
+ c.perfDataObject[0].TokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.windowsIntegratedAuthentications,
prometheus.CounterValue,
- adfsData[windowsIntegratedAuthentications].FirstValue,
+ c.perfDataObject[0].WindowsIntegratedAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthAuthZRequests,
prometheus.CounterValue,
- adfsData[oAuthAuthZRequests].FirstValue,
+ c.perfDataObject[0].OAuthAuthZRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthentications,
prometheus.CounterValue,
- adfsData[oAuthClientAuthentications].FirstValue,
+ c.perfDataObject[0].OAuthClientAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthenticationsFailures,
prometheus.CounterValue,
- adfsData[oAuthClientAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].OAuthClientAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequestFailures,
prometheus.CounterValue,
- adfsData[oAuthClientCredentialRequestFailures].FirstValue,
+ c.perfDataObject[0].OAuthClientCredentialRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequests,
prometheus.CounterValue,
- adfsData[oAuthClientCredentialRequests].FirstValue,
+ c.perfDataObject[0].OAuthClientCredentialRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
prometheus.CounterValue,
- adfsData[oAuthClientPrivateKeyJWTAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].OAuthClientPrivateKeyJWTAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthentications,
prometheus.CounterValue,
- adfsData[oAuthClientPrivateKeyJWTAuthentications].FirstValue,
+ c.perfDataObject[0].OAuthClientPrivateKeyJWTAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthenticationFailures,
prometheus.CounterValue,
- adfsData[oAuthClientBasicAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].OAuthClientBasicAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthentications,
prometheus.CounterValue,
- adfsData[oAuthClientBasicAuthentications].FirstValue,
+ c.perfDataObject[0].OAuthClientBasicAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthenticationFailures,
prometheus.CounterValue,
- adfsData[oAuthClientSecretPostAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].OAuthClientSecretPostAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthentications,
prometheus.CounterValue,
- adfsData[oAuthClientSecretPostAuthentications].FirstValue,
+ c.perfDataObject[0].OAuthClientSecretPostAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthenticationFailures,
prometheus.CounterValue,
- adfsData[oAuthClientWindowsAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].OAuthClientWindowsAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthentications,
prometheus.CounterValue,
- adfsData[oAuthClientWindowsAuthentications].FirstValue,
+ c.perfDataObject[0].OAuthClientWindowsAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateRequestFailures,
prometheus.CounterValue,
- adfsData[oAuthLogonCertRequestFailures].FirstValue,
+ c.perfDataObject[0].OAuthLogonCertRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateTokenRequests,
prometheus.CounterValue,
- adfsData[oAuthLogonCertTokenRequests].FirstValue,
+ c.perfDataObject[0].OAuthLogonCertTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequestFailures,
prometheus.CounterValue,
- adfsData[oAuthPasswordGrantRequestFailures].FirstValue,
+ c.perfDataObject[0].OAuthPasswordGrantRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequests,
prometheus.CounterValue,
- adfsData[oAuthPasswordGrantRequests].FirstValue,
+ c.perfDataObject[0].OAuthPasswordGrantRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthTokenRequests,
prometheus.CounterValue,
- adfsData[oAuthTokenRequests].FirstValue,
+ c.perfDataObject[0].OAuthTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.samlPTokenRequests,
prometheus.CounterValue,
- adfsData[samlPTokenRequests].FirstValue,
+ c.perfDataObject[0].SamlPTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthenticationFailures,
prometheus.CounterValue,
- adfsData[ssoAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].SsoAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthentications,
prometheus.CounterValue,
- adfsData[ssoAuthentications].FirstValue,
+ c.perfDataObject[0].SsoAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.wsFedTokenRequests,
prometheus.CounterValue,
- adfsData[wsFedTokenRequests].FirstValue,
+ c.perfDataObject[0].WsFedTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.wsTrustTokenRequests,
prometheus.CounterValue,
- adfsData[wsTrustTokenRequests].FirstValue,
+ c.perfDataObject[0].WsTrustTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthenticationFailures,
prometheus.CounterValue,
- adfsData[usernamePasswordAuthenticationFailures].FirstValue,
+ c.perfDataObject[0].UsernamePasswordAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthentications,
prometheus.CounterValue,
- adfsData[usernamePasswordAuthentications].FirstValue,
+ c.perfDataObject[0].UsernamePasswordAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthenticationFailures,
prometheus.CounterValue,
- adfsData[externalAuthNFailures].FirstValue,
+ c.perfDataObject[0].ExternalAuthNFailures,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthentications,
prometheus.CounterValue,
- adfsData[externalAuthentications].FirstValue,
+ c.perfDataObject[0].ExternalAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.artifactDBFailures,
prometheus.CounterValue,
- adfsData[artifactDBFailures].FirstValue,
+ c.perfDataObject[0].ArtifactDBFailures,
)
ch <- prometheus.MustNewConstMetric(
c.avgArtifactDBQueryTime,
prometheus.CounterValue,
- adfsData[avgArtifactDBQueryTime].FirstValue*math.Pow(10, -8),
+ c.perfDataObject[0].AvgArtifactDBQueryTime*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.configDBFailures,
prometheus.CounterValue,
- adfsData[configDBFailures].FirstValue,
+ c.perfDataObject[0].ConfigDBFailures,
)
ch <- prometheus.MustNewConstMetric(
c.avgConfigDBQueryTime,
prometheus.CounterValue,
- adfsData[avgConfigDBQueryTime].FirstValue*math.Pow(10, -8),
+ c.perfDataObject[0].AvgConfigDBQueryTime*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.federationMetadataRequests,
prometheus.CounterValue,
- adfsData[federationMetadataRequests].FirstValue,
+ c.perfDataObject[0].FederationMetadataRequests,
)
return nil
diff --git a/internal/collector/adfs/const.go b/internal/collector/adfs/const.go
deleted file mode 100644
index a301c9187..000000000
--- a/internal/collector/adfs/const.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package adfs
-
-const (
- adLoginConnectionFailures = "AD Login Connection Failures"
- artifactDBFailures = "Artifact Database Connection Failures"
- avgArtifactDBQueryTime = "Average Artifact Database Query Time"
- avgConfigDBQueryTime = "Average Config Database Query Time"
- certificateAuthentications = "Certificate Authentications"
- configDBFailures = "Configuration Database Connection Failures"
- deviceAuthentications = "Device Authentications"
- externalAuthentications = "External Authentications"
- externalAuthNFailures = "External Authentication Failures"
- extranetAccountLockouts = "Extranet Account Lockouts"
- federatedAuthentications = "Federated Authentications"
- federationMetadataRequests = "Federation Metadata Requests"
- oAuthAuthZRequests = "OAuth AuthZ Requests"
- oAuthClientAuthenticationFailures = "OAuth Client Authentications Failures"
- oAuthClientAuthentications = "OAuth Client Authentications"
- oAuthClientBasicAuthenticationFailures = "OAuth Client Secret Basic Authentication Failures"
- oAuthClientBasicAuthentications = "OAuth Client Secret Basic Authentications"
- oAuthClientCredentialRequestFailures = "OAuth Client Credentials Request Failures"
- oAuthClientCredentialRequests = "OAuth Client Credentials Requests"
- oAuthClientPrivateKeyJWTAuthenticationFailures = "OAuth Client Private Key Jwt Authentication Failures"
- oAuthClientPrivateKeyJWTAuthentications = "OAuth Client Private Key Jwt Authentications"
- oAuthClientSecretPostAuthenticationFailures = "OAuth Client Secret Post Authentication Failures"
- oAuthClientSecretPostAuthentications = "OAuth Client Secret Post Authentications"
- oAuthClientWindowsAuthenticationFailures = "OAuth Client Windows Integrated Authentication Failures"
- oAuthClientWindowsAuthentications = "OAuth Client Windows Integrated Authentications"
- oAuthLogonCertRequestFailures = "OAuth Logon Certificate Request Failures"
- oAuthLogonCertTokenRequests = "OAuth Logon Certificate Token Requests"
- oAuthPasswordGrantRequestFailures = "OAuth Password Grant Request Failures"
- oAuthPasswordGrantRequests = "OAuth Password Grant Requests"
- oAuthTokenRequests = "OAuth Token Requests"
- passiveRequests = "Passive Requests"
- passportAuthentications = "Microsoft Passport Authentications"
- passwordChangeFailed = "Password Change Failed Requests"
- passwordChangeSucceeded = "Password Change Successful Requests"
- samlPTokenRequests = "SAML-P Token Requests"
- ssoAuthenticationFailures = "SSO Authentication Failures"
- ssoAuthentications = "SSO Authentications"
- tokenRequests = "Token Requests"
- usernamePasswordAuthenticationFailures = "U/P Authentication Failures"
- usernamePasswordAuthentications = "U/P Authentications"
- windowsIntegratedAuthentications = "Windows Integrated Authentications"
- wsFedTokenRequests = "WS-Fed Token Requests"
- wsTrustTokenRequests = "WS-Trust Token Requests"
-)
diff --git a/internal/collector/adfs/types.go b/internal/collector/adfs/types.go
new file mode 100644
index 000000000..475f35162
--- /dev/null
+++ b/internal/collector/adfs/types.go
@@ -0,0 +1,62 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package adfs
+
+type perfDataCounterValues struct {
+ AdLoginConnectionFailures float64 `perfdata:"AD Login Connection Failures"`
+ ArtifactDBFailures float64 `perfdata:"Artifact Database Connection Failures"`
+ AvgArtifactDBQueryTime float64 `perfdata:"Average Artifact Database Query Time"`
+ AvgConfigDBQueryTime float64 `perfdata:"Average Config Database Query Time"`
+ CertificateAuthentications float64 `perfdata:"Certificate Authentications"`
+ ConfigDBFailures float64 `perfdata:"Configuration Database Connection Failures"`
+ DeviceAuthentications float64 `perfdata:"Device Authentications"`
+ ExternalAuthentications float64 `perfdata:"External Authentications"`
+ ExternalAuthNFailures float64 `perfdata:"External Authentication Failures"`
+ ExtranetAccountLockouts float64 `perfdata:"Extranet Account Lockouts"`
+ FederatedAuthentications float64 `perfdata:"Federated Authentications"`
+ FederationMetadataRequests float64 `perfdata:"Federation Metadata Requests"`
+ OAuthAuthZRequests float64 `perfdata:"OAuth AuthZ Requests"`
+ OAuthClientAuthenticationFailures float64 `perfdata:"OAuth Client Authentications Failures"`
+ OAuthClientAuthentications float64 `perfdata:"OAuth Client Authentications"`
+ OAuthClientBasicAuthenticationFailures float64 `perfdata:"OAuth Client Secret Basic Authentication Failures"`
+ OAuthClientBasicAuthentications float64 `perfdata:"OAuth Client Secret Basic Authentications"`
+ OAuthClientCredentialRequestFailures float64 `perfdata:"OAuth Client Credentials Request Failures"`
+ OAuthClientCredentialRequests float64 `perfdata:"OAuth Client Credentials Requests"`
+ OAuthClientPrivateKeyJWTAuthenticationFailures float64 `perfdata:"OAuth Client Private Key Jwt Authentication Failures"`
+ OAuthClientPrivateKeyJWTAuthentications float64 `perfdata:"OAuth Client Private Key Jwt Authentications"`
+ OAuthClientSecretPostAuthenticationFailures float64 `perfdata:"OAuth Client Secret Post Authentication Failures"`
+ OAuthClientSecretPostAuthentications float64 `perfdata:"OAuth Client Secret Post Authentications"`
+ OAuthClientWindowsAuthenticationFailures float64 `perfdata:"OAuth Client Windows Integrated Authentication Failures"`
+ OAuthClientWindowsAuthentications float64 `perfdata:"OAuth Client Windows Integrated Authentications"`
+ OAuthLogonCertRequestFailures float64 `perfdata:"OAuth Logon Certificate Request Failures"`
+ OAuthLogonCertTokenRequests float64 `perfdata:"OAuth Logon Certificate Token Requests"`
+ OAuthPasswordGrantRequestFailures float64 `perfdata:"OAuth Password Grant Request Failures"`
+ OAuthPasswordGrantRequests float64 `perfdata:"OAuth Password Grant Requests"`
+ OAuthTokenRequests float64 `perfdata:"OAuth Token Requests"`
+ PassiveRequests float64 `perfdata:"Passive Requests"`
+ PassportAuthentications float64 `perfdata:"Microsoft Passport Authentications"`
+ PasswordChangeFailed float64 `perfdata:"Password Change Failed Requests"`
+ PasswordChangeSucceeded float64 `perfdata:"Password Change Successful Requests"`
+ SamlPTokenRequests float64 `perfdata:"SAML-P Token Requests"`
+ SsoAuthenticationFailures float64 `perfdata:"SSO Authentication Failures"`
+ SsoAuthentications float64 `perfdata:"SSO Authentications"`
+ TokenRequests float64 `perfdata:"Token Requests"`
+ UsernamePasswordAuthenticationFailures float64 `perfdata:"U/P Authentication Failures"`
+ UsernamePasswordAuthentications float64 `perfdata:"U/P Authentications"`
+ WindowsIntegratedAuthentications float64 `perfdata:"Windows Integrated Authentications"`
+ WsFedTokenRequests float64 `perfdata:"WS-Fed Token Requests"`
+ WsTrustTokenRequests float64 `perfdata:"WS-Trust Token Requests"`
+}
diff --git a/internal/collector/cache/cache.go b/internal/collector/cache/cache.go
index e894e112e..632505edd 100644
--- a/internal/collector/cache/cache.go
+++ b/internal/collector/cache/cache.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
asyncCopyReadsTotal *prometheus.Desc
asyncDataMapsTotal *prometheus.Desc
@@ -99,37 +100,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Cache", perfdata.InstancesAll, []string{
- asyncCopyReadsTotal,
- asyncDataMapsTotal,
- asyncFastReadsTotal,
- asyncMDLReadsTotal,
- asyncPinReadsTotal,
- copyReadHitsTotal,
- copyReadsTotal,
- dataFlushesTotal,
- dataFlushPagesTotal,
- dataMapHitsPercent,
- dataMapPinsTotal,
- dataMapsTotal,
- dirtyPages,
- dirtyPageThreshold,
- fastReadNotPossiblesTotal,
- fastReadResourceMissesTotal,
- fastReadsTotal,
- lazyWriteFlushesTotal,
- lazyWritePagesTotal,
- mdlReadHitsTotal,
- mdlReadsTotal,
- pinReadHitsTotal,
- pinReadsTotal,
- readAheadsTotal,
- syncCopyReadsTotal,
- syncDataMapsTotal,
- syncFastReadsTotal,
- syncMDLReadsTotal,
- syncPinReadsTotal,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Cache", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
@@ -314,189 +285,183 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect implements the Collector interface.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Cache metrics: %w", err)
}
- cacheData, ok := data[perfdata.InstanceEmpty]
-
- if !ok {
- return fmt.Errorf("failed to collect Cache metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.asyncCopyReadsTotal,
prometheus.CounterValue,
- cacheData[asyncCopyReadsTotal].FirstValue,
+ c.perfDataObject[0].AsyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncDataMapsTotal,
prometheus.CounterValue,
- cacheData[asyncDataMapsTotal].FirstValue,
+ c.perfDataObject[0].AsyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncFastReadsTotal,
prometheus.CounterValue,
- cacheData[asyncFastReadsTotal].FirstValue,
+ c.perfDataObject[0].AsyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncMDLReadsTotal,
prometheus.CounterValue,
- cacheData[asyncMDLReadsTotal].FirstValue,
+ c.perfDataObject[0].AsyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncPinReadsTotal,
prometheus.CounterValue,
- cacheData[asyncPinReadsTotal].FirstValue,
+ c.perfDataObject[0].AsyncPinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.copyReadHitsTotal,
prometheus.GaugeValue,
- cacheData[copyReadHitsTotal].FirstValue,
+ c.perfDataObject[0].CopyReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.copyReadsTotal,
prometheus.CounterValue,
- cacheData[copyReadsTotal].FirstValue,
+ c.perfDataObject[0].CopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataFlushesTotal,
prometheus.CounterValue,
- cacheData[dataFlushesTotal].FirstValue,
+ c.perfDataObject[0].DataFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataFlushPagesTotal,
prometheus.CounterValue,
- cacheData[dataFlushPagesTotal].FirstValue,
+ c.perfDataObject[0].DataFlushPagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapHitsPercent,
prometheus.GaugeValue,
- cacheData[dataMapHitsPercent].FirstValue,
+ c.perfDataObject[0].DataMapHitsPercent,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapPinsTotal,
prometheus.CounterValue,
- cacheData[dataMapPinsTotal].FirstValue,
+ c.perfDataObject[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapsTotal,
prometheus.CounterValue,
- cacheData[dataMapsTotal].FirstValue,
+ c.perfDataObject[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPages,
prometheus.GaugeValue,
- cacheData[dirtyPages].FirstValue,
+ c.perfDataObject[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPageThreshold,
prometheus.GaugeValue,
- cacheData[dirtyPageThreshold].FirstValue,
+ c.perfDataObject[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadNotPossiblesTotal,
prometheus.CounterValue,
- cacheData[fastReadNotPossiblesTotal].FirstValue,
+ c.perfDataObject[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadResourceMissesTotal,
prometheus.CounterValue,
- cacheData[fastReadResourceMissesTotal].FirstValue,
+ c.perfDataObject[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadsTotal,
prometheus.CounterValue,
- cacheData[fastReadsTotal].FirstValue,
+ c.perfDataObject[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWriteFlushesTotal,
prometheus.CounterValue,
- cacheData[lazyWriteFlushesTotal].FirstValue,
+ c.perfDataObject[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWritePagesTotal,
prometheus.CounterValue,
- cacheData[lazyWritePagesTotal].FirstValue,
+ c.perfDataObject[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadHitsTotal,
prometheus.CounterValue,
- cacheData[mdlReadHitsTotal].FirstValue,
+ c.perfDataObject[0].MdlReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadsTotal,
prometheus.CounterValue,
- cacheData[mdlReadsTotal].FirstValue,
+ c.perfDataObject[0].MdlReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadHitsTotal,
prometheus.CounterValue,
- cacheData[pinReadHitsTotal].FirstValue,
+ c.perfDataObject[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadsTotal,
prometheus.CounterValue,
- cacheData[pinReadsTotal].FirstValue,
+ c.perfDataObject[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.readAheadsTotal,
prometheus.CounterValue,
- cacheData[readAheadsTotal].FirstValue,
+ c.perfDataObject[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncCopyReadsTotal,
prometheus.CounterValue,
- cacheData[syncCopyReadsTotal].FirstValue,
+ c.perfDataObject[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncDataMapsTotal,
prometheus.CounterValue,
- cacheData[syncDataMapsTotal].FirstValue,
+ c.perfDataObject[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncFastReadsTotal,
prometheus.CounterValue,
- cacheData[syncFastReadsTotal].FirstValue,
+ c.perfDataObject[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncMDLReadsTotal,
prometheus.CounterValue,
- cacheData[syncMDLReadsTotal].FirstValue,
+ c.perfDataObject[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncPinReadsTotal,
prometheus.CounterValue,
- cacheData[syncPinReadsTotal].FirstValue,
+ c.perfDataObject[0].SyncPinReadsTotal,
)
return nil
diff --git a/internal/collector/cache/const.go b/internal/collector/cache/const.go
deleted file mode 100644
index 274d73587..000000000
--- a/internal/collector/cache/const.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package cache
-
-// Perflib "Cache":
-// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
-const (
- asyncCopyReadsTotal = "Async Copy Reads/sec"
- asyncDataMapsTotal = "Async Data Maps/sec"
- asyncFastReadsTotal = "Async Fast Reads/sec"
- asyncMDLReadsTotal = "Async MDL Reads/sec"
- asyncPinReadsTotal = "Async Pin Reads/sec"
- copyReadHitsTotal = "Copy Read Hits %"
- copyReadsTotal = "Copy Reads/sec"
- dataFlushesTotal = "Data Flushes/sec"
- dataFlushPagesTotal = "Data Flush Pages/sec"
- dataMapHitsPercent = "Data Map Hits %"
- dataMapPinsTotal = "Data Map Pins/sec"
- dataMapsTotal = "Data Maps/sec"
- dirtyPages = "Dirty Pages"
- dirtyPageThreshold = "Dirty Page Threshold"
- fastReadNotPossiblesTotal = "Fast Read Not Possibles/sec"
- fastReadResourceMissesTotal = "Fast Read Resource Misses/sec"
- fastReadsTotal = "Fast Reads/sec"
- lazyWriteFlushesTotal = "Lazy Write Flushes/sec"
- lazyWritePagesTotal = "Lazy Write Pages/sec"
- mdlReadHitsTotal = "MDL Read Hits %"
- mdlReadsTotal = "MDL Reads/sec"
- pinReadHitsTotal = "Pin Read Hits %"
- pinReadsTotal = "Pin Reads/sec"
- readAheadsTotal = "Read Aheads/sec"
- syncCopyReadsTotal = "Sync Copy Reads/sec"
- syncDataMapsTotal = "Sync Data Maps/sec"
- syncFastReadsTotal = "Sync Fast Reads/sec"
- syncMDLReadsTotal = "Sync MDL Reads/sec"
- syncPinReadsTotal = "Sync Pin Reads/sec"
-)
diff --git a/internal/collector/cache/types.go b/internal/collector/cache/types.go
new file mode 100644
index 000000000..41abec0d1
--- /dev/null
+++ b/internal/collector/cache/types.go
@@ -0,0 +1,50 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package cache
+
+// Perflib "Cache":
+// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
+type perfDataCounterValues struct {
+ AsyncCopyReadsTotal float64 `perfdata:"Async Copy Reads/sec"`
+ AsyncDataMapsTotal float64 `perfdata:"Async Data Maps/sec"`
+ AsyncFastReadsTotal float64 `perfdata:"Async Fast Reads/sec"`
+ AsyncMDLReadsTotal float64 `perfdata:"Async MDL Reads/sec"`
+ AsyncPinReadsTotal float64 `perfdata:"Async Pin Reads/sec"`
+ CopyReadHitsTotal float64 `perfdata:"Copy Read Hits %"`
+ CopyReadsTotal float64 `perfdata:"Copy Reads/sec"`
+ DataFlushesTotal float64 `perfdata:"Data Flushes/sec"`
+ DataFlushPagesTotal float64 `perfdata:"Data Flush Pages/sec"`
+ DataMapHitsPercent float64 `perfdata:"Data Map Hits %"`
+ DataMapPinsTotal float64 `perfdata:"Data Map Pins/sec"`
+ DataMapsTotal float64 `perfdata:"Data Maps/sec"`
+ DirtyPages float64 `perfdata:"Dirty Pages"`
+ DirtyPageThreshold float64 `perfdata:"Dirty Page Threshold"`
+ FastReadNotPossiblesTotal float64 `perfdata:"Fast Read Not Possibles/sec"`
+ FastReadResourceMissesTotal float64 `perfdata:"Fast Read Resource Misses/sec"`
+ FastReadsTotal float64 `perfdata:"Fast Reads/sec"`
+ LazyWriteFlushesTotal float64 `perfdata:"Lazy Write Flushes/sec"`
+ LazyWritePagesTotal float64 `perfdata:"Lazy Write Pages/sec"`
+ MdlReadHitsTotal float64 `perfdata:"MDL Read Hits %"`
+ MdlReadsTotal float64 `perfdata:"MDL Reads/sec"`
+ PinReadHitsTotal float64 `perfdata:"Pin Read Hits %"`
+ PinReadsTotal float64 `perfdata:"Pin Reads/sec"`
+ ReadAheadsTotal float64 `perfdata:"Read Aheads/sec"`
+ SyncCopyReadsTotal float64 `perfdata:"Sync Copy Reads/sec"`
+ SyncDataMapsTotal float64 `perfdata:"Sync Data Maps/sec"`
+ SyncFastReadsTotal float64 `perfdata:"Sync Fast Reads/sec"`
+ SyncMDLReadsTotal float64 `perfdata:"Sync MDL Reads/sec"`
+ SyncPinReadsTotal float64 `perfdata:"Sync Pin Reads/sec"`
+}
diff --git a/internal/collector/container/container.go b/internal/collector/container/container.go
index c16c4f6b5..5d28e4f14 100644
--- a/internal/collector/container/container.go
+++ b/internal/collector/container/container.go
@@ -24,7 +24,7 @@ import (
"github.com/Microsoft/hcsshim"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -322,19 +322,19 @@ func (c *Collector) collectContainer(ch chan<- prometheus.Metric, containerDetai
ch <- prometheus.MustNewConstMetric(
c.runtimeTotal,
prometheus.CounterValue,
- float64(containerStats.Processor.TotalRuntime100ns)*perfdata.TicksToSecondScaleFactor,
+ float64(containerStats.Processor.TotalRuntime100ns)*pdh.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeUser,
prometheus.CounterValue,
- float64(containerStats.Processor.RuntimeUser100ns)*perfdata.TicksToSecondScaleFactor,
+ float64(containerStats.Processor.RuntimeUser100ns)*pdh.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeKernel,
prometheus.CounterValue,
- float64(containerStats.Processor.RuntimeKernel100ns)*perfdata.TicksToSecondScaleFactor,
+ float64(containerStats.Processor.RuntimeKernel100ns)*pdh.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
diff --git a/internal/collector/cpu/const.go b/internal/collector/cpu/const.go
deleted file mode 100644
index a5444991e..000000000
--- a/internal/collector/cpu/const.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package cpu
-
-// Processor performance counters.
-const (
- c1TimeSeconds = "% C1 Time"
- c2TimeSeconds = "% C2 Time"
- c3TimeSeconds = "% C3 Time"
- c1TransitionsTotal = "C1 Transitions/sec"
- c2TransitionsTotal = "C2 Transitions/sec"
- c3TransitionsTotal = "C3 Transitions/sec"
- clockInterruptsTotal = "Clock Interrupts/sec"
- dpcQueuedPerSecond = "DPCs Queued/sec"
- dpcTimeSeconds = "% DPC Time"
- idleBreakEventsTotal = "Idle Break Events/sec"
- idleTimeSeconds = "% Idle Time"
- interruptsTotal = "Interrupts/sec"
- interruptTimeSeconds = "% Interrupt Time"
- parkingStatus = "Parking Status"
- performanceLimitPercent = "% Performance Limit"
- priorityTimeSeconds = "% Priority Time"
- privilegedTimeSeconds = "% Privileged Time"
- privilegedUtilitySeconds = "% Privileged Utility"
- processorFrequencyMHz = "Processor Frequency"
- processorPerformance = "% Processor Performance"
- processorTimeSeconds = "% Processor Time"
- processorUtilityRate = "% Processor Utility"
- userTimeSeconds = "% User Time"
-)
diff --git a/internal/collector/cpu/cpu.go b/internal/collector/cpu/cpu.go
index e81b5a016..a6e8d2219 100644
--- a/internal/collector/cpu/cpu.go
+++ b/internal/collector/cpu/cpu.go
@@ -22,7 +22,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -38,7 +38,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
mu sync.Mutex
@@ -92,31 +93,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
c.mu = sync.Mutex{}
- c.perfDataCollector, err = perfdata.NewCollector("Processor Information", perfdata.InstancesAll, []string{
- c1TimeSeconds,
- c2TimeSeconds,
- c3TimeSeconds,
- c1TransitionsTotal,
- c2TransitionsTotal,
- c3TransitionsTotal,
- clockInterruptsTotal,
- dpcQueuedPerSecond,
- dpcTimeSeconds,
- idleBreakEventsTotal,
- idleTimeSeconds,
- interruptsTotal,
- interruptTimeSeconds,
- parkingStatus,
- performanceLimitPercent,
- priorityTimeSeconds,
- privilegedTimeSeconds,
- privilegedUtilitySeconds,
- processorFrequencyMHz,
- processorPerformance,
- processorTimeSeconds,
- processorUtilityRate,
- userTimeSeconds,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Processor Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
@@ -216,14 +193,15 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.mu.Lock() // Lock is needed to prevent concurrent map access to c.processorRTCValues
defer c.mu.Unlock()
- data, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Processor Information metrics: %w", err)
}
var coreCount float64
- for core, coreData := range data {
+ for _, coreData := range c.perfDataObject {
+ core := coreData.Name
coreCount++
var (
@@ -233,17 +211,17 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
)
if counterProcessorRTCValues, ok = c.processorRTCValues[core]; ok {
- counterProcessorRTCValues.AddValue(uint32(coreData[processorUtilityRate].SecondValue))
+ counterProcessorRTCValues.AddValue(uint32(coreData.ProcessorUtilityRateSecondValue))
} else {
- counterProcessorRTCValues = utils.NewCounter(uint32(coreData[privilegedUtilitySeconds].SecondValue))
+ counterProcessorRTCValues = utils.NewCounter(uint32(coreData.ProcessorUtilityRateSecondValue))
}
c.processorRTCValues[core] = counterProcessorRTCValues
if counterProcessorMPerfValues, ok = c.processorMPerfValues[core]; ok {
- counterProcessorMPerfValues.AddValue(uint32(coreData[processorPerformance].SecondValue))
+ counterProcessorMPerfValues.AddValue(uint32(coreData.ProcessorPerformanceSecondValue))
} else {
- counterProcessorMPerfValues = utils.NewCounter(uint32(coreData[processorPerformance].SecondValue))
+ counterProcessorMPerfValues = utils.NewCounter(uint32(coreData.ProcessorPerformanceSecondValue))
}
c.processorMPerfValues[core] = counterProcessorMPerfValues
@@ -251,95 +229,95 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
- coreData[c1TimeSeconds].FirstValue,
+ coreData.C1TimeSeconds,
core, "c1",
)
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
- coreData[c2TimeSeconds].FirstValue,
+ coreData.C2TimeSeconds,
core, "c2",
)
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
- coreData[c3TimeSeconds].FirstValue,
+ coreData.C3TimeSeconds,
core, "c3",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
- coreData[idleTimeSeconds].FirstValue,
+ coreData.IdleTimeSeconds,
core, "idle",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
- coreData[interruptTimeSeconds].FirstValue,
+ coreData.InterruptTimeSeconds,
core, "interrupt",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
- coreData[dpcTimeSeconds].FirstValue,
+ coreData.DpcTimeSeconds,
core, "dpc",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
- coreData[privilegedTimeSeconds].FirstValue,
+ coreData.PrivilegedTimeSeconds,
core, "privileged",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
- coreData[userTimeSeconds].FirstValue,
+ coreData.UserTimeSeconds,
core, "user",
)
ch <- prometheus.MustNewConstMetric(
c.interruptsTotal,
prometheus.CounterValue,
- coreData[interruptsTotal].FirstValue,
+ coreData.InterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.dpcsTotal,
prometheus.CounterValue,
- coreData[dpcQueuedPerSecond].FirstValue,
+ coreData.DpcQueuedPerSecond,
core,
)
ch <- prometheus.MustNewConstMetric(
c.clockInterruptsTotal,
prometheus.CounterValue,
- coreData[clockInterruptsTotal].FirstValue,
+ coreData.ClockInterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.idleBreakEventsTotal,
prometheus.CounterValue,
- coreData[idleBreakEventsTotal].FirstValue,
+ coreData.IdleBreakEventsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.parkingStatus,
prometheus.GaugeValue,
- coreData[parkingStatus].FirstValue,
+ coreData.ParkingStatus,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorFrequencyMHz,
prometheus.GaugeValue,
- coreData[processorFrequencyMHz].FirstValue,
+ coreData.ProcessorFrequencyMHz,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorPerformance,
prometheus.CounterValue,
- coreData[processorPerformance].FirstValue,
+ coreData.ProcessorPerformance,
core,
)
ch <- prometheus.MustNewConstMetric(
@@ -357,13 +335,13 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.processorUtility,
prometheus.CounterValue,
- coreData[processorUtilityRate].FirstValue,
+ coreData.ProcessorUtilityRate,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorPrivilegedUtility,
prometheus.CounterValue,
- coreData[privilegedUtilitySeconds].FirstValue,
+ coreData.PrivilegedUtilitySeconds,
core,
)
}
diff --git a/internal/collector/cpu/types.go b/internal/collector/cpu/types.go
new file mode 100644
index 000000000..f9149c44c
--- /dev/null
+++ b/internal/collector/cpu/types.go
@@ -0,0 +1,47 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package cpu
+
+// Processor performance counters.
+type perfDataCounterValues struct {
+ Name string
+
+ C1TimeSeconds float64 `perfdata:"% C1 Time"`
+ C2TimeSeconds float64 `perfdata:"% C2 Time"`
+ C3TimeSeconds float64 `perfdata:"% C3 Time"`
+ C1TransitionsTotal float64 `perfdata:"C1 Transitions/sec"`
+ C2TransitionsTotal float64 `perfdata:"C2 Transitions/sec"`
+ C3TransitionsTotal float64 `perfdata:"C3 Transitions/sec"`
+ ClockInterruptsTotal float64 `perfdata:"Clock Interrupts/sec"`
+ DpcQueuedPerSecond float64 `perfdata:"DPCs Queued/sec"`
+ DpcTimeSeconds float64 `perfdata:"% DPC Time"`
+ IdleBreakEventsTotal float64 `perfdata:"Idle Break Events/sec"`
+ IdleTimeSeconds float64 `perfdata:"% Idle Time"`
+ InterruptsTotal float64 `perfdata:"Interrupts/sec"`
+ InterruptTimeSeconds float64 `perfdata:"% Interrupt Time"`
+ ParkingStatus float64 `perfdata:"Parking Status"`
+ PerformanceLimitPercent float64 `perfdata:"% Performance Limit"`
+ PriorityTimeSeconds float64 `perfdata:"% Priority Time"`
+ PrivilegedTimeSeconds float64 `perfdata:"% Privileged Time"`
+ PrivilegedUtilitySeconds float64 `perfdata:"% Privileged Utility"`
+ ProcessorFrequencyMHz float64 `perfdata:"Processor Frequency"`
+ ProcessorPerformance float64 `perfdata:"% Processor Performance"`
+ ProcessorPerformanceSecondValue float64 `perfdata:"% Processor Performance,secondvalue"`
+ ProcessorTimeSeconds float64 `perfdata:"% Processor Time"`
+ ProcessorUtilityRate float64 `perfdata:"% Processor Utility"`
+ ProcessorUtilityRateSecondValue float64 `perfdata:"% Processor Utility,secondvalue"`
+ UserTimeSeconds float64 `perfdata:"% User Time"`
+}
diff --git a/internal/collector/dfsr/const.go b/internal/collector/dfsr/const.go
deleted file mode 100644
index 4e69127f4..000000000
--- a/internal/collector/dfsr/const.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package dfsr
-
-const (
- // Connection Perflib: "DFS Replication Service Connections".
- bytesReceivedTotal = "Total Bytes Received"
-
- // Folder Perflib: "DFS Replicated Folder".
- bandwidthSavingsUsingDFSReplicationTotal = "Bandwidth Savings Using DFS Replication"
- compressedSizeOfFilesReceivedTotal = "Compressed Size of Files Received"
- conflictBytesCleanedUpTotal = "Conflict Bytes Cleaned Up"
- conflictBytesGeneratedTotal = "Conflict Bytes Generated"
- conflictFilesCleanedUpTotal = "Conflict Files Cleaned Up"
- conflictFilesGeneratedTotal = "Conflict Files Generated"
- conflictFolderCleanupsCompletedTotal = "Conflict folder Cleanups Completed"
- conflictSpaceInUse = "Conflict Space In Use"
- deletedSpaceInUse = "Deleted Space In Use"
- deletedBytesCleanedUpTotal = "Deleted Bytes Cleaned Up"
- deletedBytesGeneratedTotal = "Deleted Bytes Generated"
- deletedFilesCleanedUpTotal = "Deleted Files Cleaned Up"
- deletedFilesGeneratedTotal = "Deleted Files Generated"
- fileInstallsRetriedTotal = "File Installs Retried"
- fileInstallsSucceededTotal = "File Installs Succeeded"
- filesReceivedTotal = "Total Files Received"
- rdcBytesReceivedTotal = "RDC Bytes Received"
- rdcCompressedSizeOfFilesReceivedTotal = "RDC Compressed Size of Files Received"
- rdcNumberOfFilesReceivedTotal = "RDC Number of Files Received"
- rdcSizeOfFilesReceivedTotal = "RDC Size of Files Received"
- sizeOfFilesReceivedTotal = "Size of Files Received"
- stagingSpaceInUse = "Staging Space In Use"
- stagingBytesCleanedUpTotal = "Staging Bytes Cleaned Up"
- stagingBytesGeneratedTotal = "Staging Bytes Generated"
- stagingFilesCleanedUpTotal = "Staging Files Cleaned Up"
- stagingFilesGeneratedTotal = "Staging Files Generated"
- updatesDroppedTotal = "Updates Dropped"
-
- // Volume Perflib: "DFS Replication Service Volumes".
- databaseCommitsTotal = "Database Commits"
- databaseLookupsTotal = "Database Lookups"
- usnJournalRecordsReadTotal = "USN Journal Records Read"
- usnJournalRecordsAcceptedTotal = "USN Journal Records Accepted"
- usnJournalUnreadPercentage = "USN Journal Unread Percentage"
-)
diff --git a/internal/collector/dfsr/dfsr.go b/internal/collector/dfsr/dfsr.go
index 272756a39..8e28e3fc1 100644
--- a/internal/collector/dfsr/dfsr.go
+++ b/internal/collector/dfsr/dfsr.go
@@ -24,7 +24,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -44,9 +44,12 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- perfDataCollectorConnection *perfdata.Collector
- perfDataCollectorFolder *perfdata.Collector
- perfDataCollectorVolume *perfdata.Collector
+ perfDataCollectorConnection *pdh.Collector
+ perfDataCollectorFolder *pdh.Collector
+ perfDataCollectorVolume *pdh.Collector
+ perfDataObjectConnection []perfDataCounterValuesConnection
+ perfDataObjectFolder []perfDataCounterValuesFolder
+ perfDataObjectVolume []perfDataCounterValuesVolume
// connection source
connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
@@ -160,69 +163,26 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") {
- c.perfDataCollectorConnection, err = perfdata.NewCollector("DFS Replication Connections", perfdata.InstancesAll, []string{
- bandwidthSavingsUsingDFSReplicationTotal,
- bytesReceivedTotal,
- compressedSizeOfFilesReceivedTotal,
- filesReceivedTotal,
- rdcBytesReceivedTotal,
- rdcCompressedSizeOfFilesReceivedTotal,
- rdcNumberOfFilesReceivedTotal,
- rdcSizeOfFilesReceivedTotal,
- sizeOfFilesReceivedTotal,
- })
+ c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection]("DFS Replication Connections", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
- c.perfDataCollectorFolder, err = perfdata.NewCollector("DFS Replicated Folders", perfdata.InstancesAll, []string{
- bandwidthSavingsUsingDFSReplicationTotal,
- compressedSizeOfFilesReceivedTotal,
- conflictBytesCleanedUpTotal,
- conflictBytesGeneratedTotal,
- conflictFilesCleanedUpTotal,
- conflictFilesGeneratedTotal,
- conflictFolderCleanupsCompletedTotal,
- conflictSpaceInUse,
- deletedSpaceInUse,
- deletedBytesCleanedUpTotal,
- deletedBytesGeneratedTotal,
- deletedFilesCleanedUpTotal,
- deletedFilesGeneratedTotal,
- fileInstallsRetriedTotal,
- fileInstallsSucceededTotal,
- filesReceivedTotal,
- rdcBytesReceivedTotal,
- rdcCompressedSizeOfFilesReceivedTotal,
- rdcNumberOfFilesReceivedTotal,
- rdcSizeOfFilesReceivedTotal,
- sizeOfFilesReceivedTotal,
- stagingSpaceInUse,
- stagingBytesCleanedUpTotal,
- stagingBytesGeneratedTotal,
- stagingFilesCleanedUpTotal,
- stagingFilesGeneratedTotal,
- updatesDroppedTotal,
- })
+ c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder]("DFS Replicated Folders", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
- c.perfDataCollectorVolume, err = perfdata.NewCollector("DFS Replication Service Volumes", perfdata.InstancesAll, []string{
- databaseCommitsTotal,
- databaseLookupsTotal,
- usnJournalRecordsReadTotal,
- usnJournalRecordsAcceptedTotal,
- usnJournalUnreadPercentage,
- })
+ c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume]("DFS Replication Service Volumes", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}
}
+
// connection
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
@@ -537,76 +497,74 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectPDHConnection(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorConnection.Collect()
+ err := c.perfDataCollectorConnection.Collect(&c.perfDataObjectConnection)
if err != nil {
return fmt.Errorf("failed to collect DFS Replication Connections metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect DFS Replication Connections metrics: %w", types.ErrNoData)
- }
+ for _, connection := range c.perfDataObjectConnection {
+ name := connection.Name
- for name, connection := range perfData {
ch <- prometheus.MustNewConstMetric(
c.connectionBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
- connection[bandwidthSavingsUsingDFSReplicationTotal].FirstValue,
+ connection.BandwidthSavingsUsingDFSReplicationTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionBytesReceivedTotal,
prometheus.CounterValue,
- connection[bytesReceivedTotal].FirstValue,
+ connection.BytesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- connection[compressedSizeOfFilesReceivedTotal].FirstValue,
+ connection.CompressedSizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionFilesReceivedTotal,
prometheus.CounterValue,
- connection[filesReceivedTotal].FirstValue,
+ connection.FilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCBytesReceivedTotal,
prometheus.CounterValue,
- connection[rdcBytesReceivedTotal].FirstValue,
+ connection.RdcBytesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- connection[rdcCompressedSizeOfFilesReceivedTotal].FirstValue,
+ connection.RdcCompressedSizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- connection[rdcSizeOfFilesReceivedTotal].FirstValue,
+ connection.RdcSizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue,
- connection[rdcNumberOfFilesReceivedTotal].FirstValue,
+ connection.RdcNumberOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- connection[sizeOfFilesReceivedTotal].FirstValue,
+ connection.SizeOfFilesReceivedTotal,
name,
)
}
@@ -615,202 +573,200 @@ func (c *Collector) collectPDHConnection(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectPDHFolder(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorFolder.Collect()
+ err := c.perfDataCollectorFolder.Collect(&c.perfDataObjectFolder)
if err != nil {
return fmt.Errorf("failed to collect DFS Replicated Folders metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect DFS Replicated Folders metrics: %w", types.ErrNoData)
- }
+ for _, folder := range c.perfDataObjectFolder {
+ name := folder.Name
- for name, folder := range perfData {
ch <- prometheus.MustNewConstMetric(
c.folderBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
- folder[bandwidthSavingsUsingDFSReplicationTotal].FirstValue,
+ folder.BandwidthSavingsUsingDFSReplicationTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- folder[compressedSizeOfFilesReceivedTotal].FirstValue,
+ folder.CompressedSizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesCleanedUpTotal,
prometheus.CounterValue,
- folder[conflictBytesCleanedUpTotal].FirstValue,
+ folder.ConflictBytesCleanedUpTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesGeneratedTotal,
prometheus.CounterValue,
- folder[conflictBytesGeneratedTotal].FirstValue,
+ folder.ConflictBytesGeneratedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFilesCleanedUpTotal,
prometheus.CounterValue,
- folder[conflictFilesCleanedUpTotal].FirstValue,
+ folder.ConflictFilesCleanedUpTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFilesGeneratedTotal,
prometheus.CounterValue,
- folder[conflictFilesGeneratedTotal].FirstValue,
+ folder.ConflictFilesGeneratedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFolderCleanupsCompletedTotal,
prometheus.CounterValue,
- folder[conflictFolderCleanupsCompletedTotal].FirstValue,
+ folder.ConflictFolderCleanupsCompletedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictSpaceInUse,
prometheus.GaugeValue,
- folder[conflictSpaceInUse].FirstValue,
+ folder.ConflictSpaceInUse,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedSpaceInUse,
prometheus.GaugeValue,
- folder[deletedSpaceInUse].FirstValue,
+ folder.DeletedSpaceInUse,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedBytesCleanedUpTotal,
prometheus.CounterValue,
- folder[deletedBytesCleanedUpTotal].FirstValue,
+ folder.DeletedBytesCleanedUpTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedBytesGeneratedTotal,
prometheus.CounterValue,
- folder[deletedBytesGeneratedTotal].FirstValue,
+ folder.DeletedBytesGeneratedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedFilesCleanedUpTotal,
prometheus.CounterValue,
- folder[deletedFilesCleanedUpTotal].FirstValue,
+ folder.DeletedFilesCleanedUpTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedFilesGeneratedTotal,
prometheus.CounterValue,
- folder[deletedFilesGeneratedTotal].FirstValue,
+ folder.DeletedFilesGeneratedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFileInstallsRetriedTotal,
prometheus.CounterValue,
- folder[fileInstallsRetriedTotal].FirstValue,
+ folder.FileInstallsRetriedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFileInstallsSucceededTotal,
prometheus.CounterValue,
- folder[fileInstallsSucceededTotal].FirstValue,
+ folder.FileInstallsSucceededTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFilesReceivedTotal,
prometheus.CounterValue,
- folder[filesReceivedTotal].FirstValue,
+ folder.FilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCBytesReceivedTotal,
prometheus.CounterValue,
- folder[rdcBytesReceivedTotal].FirstValue,
+ folder.RdcBytesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- folder[rdcCompressedSizeOfFilesReceivedTotal].FirstValue,
+ folder.RdcCompressedSizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue,
- folder[rdcNumberOfFilesReceivedTotal].FirstValue,
+ folder.RdcNumberOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- folder[rdcSizeOfFilesReceivedTotal].FirstValue,
+ folder.RdcSizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderSizeOfFilesReceivedTotal,
prometheus.CounterValue,
- folder[sizeOfFilesReceivedTotal].FirstValue,
+ folder.SizeOfFilesReceivedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingSpaceInUse,
prometheus.GaugeValue,
- folder[stagingSpaceInUse].FirstValue,
+ folder.StagingSpaceInUse,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingBytesCleanedUpTotal,
prometheus.CounterValue,
- folder[stagingBytesCleanedUpTotal].FirstValue,
+ folder.StagingBytesCleanedUpTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingBytesGeneratedTotal,
prometheus.CounterValue,
- folder[stagingBytesGeneratedTotal].FirstValue,
+ folder.StagingBytesGeneratedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingFilesCleanedUpTotal,
prometheus.CounterValue,
- folder[stagingFilesCleanedUpTotal].FirstValue,
+ folder.StagingFilesCleanedUpTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingFilesGeneratedTotal,
prometheus.CounterValue,
- folder[stagingFilesGeneratedTotal].FirstValue,
+ folder.StagingFilesGeneratedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderUpdatesDroppedTotal,
prometheus.CounterValue,
- folder[updatesDroppedTotal].FirstValue,
+ folder.UpdatesDroppedTotal,
name,
)
}
@@ -819,48 +775,45 @@ func (c *Collector) collectPDHFolder(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectPDHVolume(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorVolume.Collect()
+ err := c.perfDataCollectorVolume.Collect(&c.perfDataObjectVolume)
if err != nil {
return fmt.Errorf("failed to collect DFS Replication Volumes metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect DFS Replication Volumes metrics: %w", types.ErrNoData)
- }
-
- for name, volume := range perfData {
+ for _, volume := range c.perfDataObjectVolume {
+ name := volume.Name
ch <- prometheus.MustNewConstMetric(
c.volumeDatabaseLookupsTotal,
prometheus.CounterValue,
- volume[databaseLookupsTotal].FirstValue,
+ volume.DatabaseLookupsTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeDatabaseCommitsTotal,
prometheus.CounterValue,
- volume[databaseCommitsTotal].FirstValue,
+ volume.DatabaseCommitsTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalRecordsAcceptedTotal,
prometheus.CounterValue,
- volume[usnJournalRecordsAcceptedTotal].FirstValue,
+ volume.UsnJournalRecordsAcceptedTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalRecordsReadTotal,
prometheus.CounterValue,
- volume[usnJournalRecordsReadTotal].FirstValue,
+ volume.UsnJournalRecordsReadTotal,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalUnreadPercentage,
prometheus.GaugeValue,
- volume[usnJournalUnreadPercentage].FirstValue,
+ volume.UsnJournalUnreadPercentage,
name,
)
}
diff --git a/internal/collector/dfsr/types.go b/internal/collector/dfsr/types.go
new file mode 100644
index 000000000..5b94d7ddf
--- /dev/null
+++ b/internal/collector/dfsr/types.go
@@ -0,0 +1,75 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package dfsr
+
+// Connection Perflib: "DFS Replication Service Connections".
+type perfDataCounterValuesConnection struct {
+ Name string
+
+ BandwidthSavingsUsingDFSReplicationTotal float64 `perfdata:"Bandwidth Savings Using DFS Replication"`
+ BytesReceivedTotal float64 `perfdata:"Total Bytes Received"`
+ CompressedSizeOfFilesReceivedTotal float64 `perfdata:"Compressed Size of Files Received"`
+ FilesReceivedTotal float64 `perfdata:"Total Files Received"`
+ RdcBytesReceivedTotal float64 `perfdata:"RDC Bytes Received"`
+ RdcCompressedSizeOfFilesReceivedTotal float64 `perfdata:"RDC Compressed Size of Files Received"`
+ RdcNumberOfFilesReceivedTotal float64 `perfdata:"RDC Number of Files Received"`
+ RdcSizeOfFilesReceivedTotal float64 `perfdata:"RDC Size of Files Received"`
+ SizeOfFilesReceivedTotal float64 `perfdata:"Size of Files Received"`
+}
+
+// Folder Perflib: "DFS Replicated Folder".
+type perfDataCounterValuesFolder struct {
+ Name string
+
+ BandwidthSavingsUsingDFSReplicationTotal float64 `perfdata:"Bandwidth Savings Using DFS Replication"`
+ CompressedSizeOfFilesReceivedTotal float64 `perfdata:"Compressed Size of Files Received"`
+ ConflictBytesCleanedUpTotal float64 `perfdata:"Conflict Bytes Cleaned Up"`
+ ConflictBytesGeneratedTotal float64 `perfdata:"Conflict Bytes Generated"`
+ ConflictFilesCleanedUpTotal float64 `perfdata:"Conflict Files Cleaned Up"`
+ ConflictFilesGeneratedTotal float64 `perfdata:"Conflict Files Generated"`
+ ConflictFolderCleanupsCompletedTotal float64 `perfdata:"Conflict folder Cleanups Completed"`
+ ConflictSpaceInUse float64 `perfdata:"Conflict Space In Use"`
+ DeletedSpaceInUse float64 `perfdata:"Deleted Space In Use"`
+ DeletedBytesCleanedUpTotal float64 `perfdata:"Deleted Bytes Cleaned Up"`
+ DeletedBytesGeneratedTotal float64 `perfdata:"Deleted Bytes Generated"`
+ DeletedFilesCleanedUpTotal float64 `perfdata:"Deleted Files Cleaned Up"`
+ DeletedFilesGeneratedTotal float64 `perfdata:"Deleted Files Generated"`
+ FileInstallsRetriedTotal float64 `perfdata:"File Installs Retried"`
+ FileInstallsSucceededTotal float64 `perfdata:"File Installs Succeeded"`
+ FilesReceivedTotal float64 `perfdata:"Total Files Received"`
+ RdcBytesReceivedTotal float64 `perfdata:"RDC Bytes Received"`
+ RdcCompressedSizeOfFilesReceivedTotal float64 `perfdata:"RDC Compressed Size of Files Received"`
+ RdcNumberOfFilesReceivedTotal float64 `perfdata:"RDC Number of Files Received"`
+ RdcSizeOfFilesReceivedTotal float64 `perfdata:"RDC Size of Files Received"`
+ SizeOfFilesReceivedTotal float64 `perfdata:"Size of Files Received"`
+ StagingSpaceInUse float64 `perfdata:"Staging Space In Use"`
+ StagingBytesCleanedUpTotal float64 `perfdata:"Staging Bytes Cleaned Up"`
+ StagingBytesGeneratedTotal float64 `perfdata:"Staging Bytes Generated"`
+ StagingFilesCleanedUpTotal float64 `perfdata:"Staging Files Cleaned Up"`
+ StagingFilesGeneratedTotal float64 `perfdata:"Staging Files Generated"`
+ UpdatesDroppedTotal float64 `perfdata:"Updates Dropped"`
+}
+
+// Volume Perflib: "DFS Replication Service Volumes".
+type perfDataCounterValuesVolume struct {
+ Name string
+
+ DatabaseCommitsTotal float64 `perfdata:"Database Commits"`
+ DatabaseLookupsTotal float64 `perfdata:"Database Lookups"`
+ UsnJournalRecordsReadTotal float64 `perfdata:"USN Journal Records Read"`
+ UsnJournalRecordsAcceptedTotal float64 `perfdata:"USN Journal Records Accepted"`
+ UsnJournalUnreadPercentage float64 `perfdata:"USN Journal Unread Percentage"`
+}
diff --git a/internal/collector/dhcp/const.go b/internal/collector/dhcp/const.go
deleted file mode 100644
index 544c2d3e8..000000000
--- a/internal/collector/dhcp/const.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package dhcp
-
-const (
- acksTotal = "Acks/sec"
- activeQueueLength = "Active Queue Length"
- conflictCheckQueueLength = "Conflict Check Queue Length"
- declinesTotal = "Declines/sec"
- deniedDueToMatch = "Denied due to match."
- deniedDueToNonMatch = "Denied due to match."
- discoversTotal = "Discovers/sec"
- duplicatesDroppedTotal = "Duplicates Dropped/sec"
- failoverBndAckReceivedTotal = "Failover: BndAck received/sec."
- failoverBndAckSentTotal = "Failover: BndAck sent/sec."
- failoverBndUpdDropped = "Failover: BndUpd Dropped."
- failoverBndUpdPendingOutboundQueue = "Failover: BndUpd pending in outbound queue."
- failoverBndUpdReceivedTotal = "Failover: BndUpd received/sec."
- failoverBndUpdSentTotal = "Failover: BndUpd sent/sec."
- failoverTransitionsCommunicationInterruptedState = "Failover: Transitions to COMMUNICATION-INTERRUPTED state."
- failoverTransitionsPartnerDownState = "Failover: Transitions to PARTNER-DOWN state."
- failoverTransitionsRecoverState = "Failover: Transitions to RECOVER state."
- informsTotal = "Informs/sec"
- nacksTotal = "Nacks/sec"
- offerQueueLength = "Offer Queue Length"
- offersTotal = "Offers/sec"
- packetsExpiredTotal = "Packets Expired/sec"
- packetsReceivedTotal = "Packets Received/sec"
- releasesTotal = "Releases/sec"
- requestsTotal = "Requests/sec"
-)
diff --git a/internal/collector/dhcp/dhcp.go b/internal/collector/dhcp/dhcp.go
index 0d67d7007..357a64c3f 100644
--- a/internal/collector/dhcp/dhcp.go
+++ b/internal/collector/dhcp/dhcp.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
acksTotal *prometheus.Desc
activeQueueLength *prometheus.Desc
@@ -95,33 +96,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("DHCP Server", nil, []string{
- acksTotal,
- activeQueueLength,
- conflictCheckQueueLength,
- declinesTotal,
- deniedDueToMatch,
- deniedDueToNonMatch,
- discoversTotal,
- duplicatesDroppedTotal,
- failoverBndAckReceivedTotal,
- failoverBndAckSentTotal,
- failoverBndUpdDropped,
- failoverBndUpdPendingOutboundQueue,
- failoverBndUpdReceivedTotal,
- failoverBndUpdSentTotal,
- failoverTransitionsCommunicationInterruptedState,
- failoverTransitionsPartnerDownState,
- failoverTransitionsRecoverState,
- informsTotal,
- nacksTotal,
- offerQueueLength,
- offersTotal,
- packetsExpiredTotal,
- packetsReceivedTotal,
- releasesTotal,
- requestsTotal,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("DHCP Server", nil)
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
@@ -281,164 +256,159 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("failed to collect DHCP Server metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
- data[packetsReceivedTotal].FirstValue,
+ c.perfDataObject[0].PacketsReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.duplicatesDroppedTotal,
prometheus.CounterValue,
- data[duplicatesDroppedTotal].FirstValue,
+ c.perfDataObject[0].DuplicatesDroppedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.packetsExpiredTotal,
prometheus.CounterValue,
- data[packetsExpiredTotal].FirstValue,
+ c.perfDataObject[0].PacketsExpiredTotal,
)
ch <- prometheus.MustNewConstMetric(
c.activeQueueLength,
prometheus.GaugeValue,
- data[activeQueueLength].FirstValue,
+ c.perfDataObject[0].ActiveQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.conflictCheckQueueLength,
prometheus.GaugeValue,
- data[conflictCheckQueueLength].FirstValue,
+ c.perfDataObject[0].ConflictCheckQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.discoversTotal,
prometheus.CounterValue,
- data[discoversTotal].FirstValue,
+ c.perfDataObject[0].DiscoversTotal,
)
ch <- prometheus.MustNewConstMetric(
c.offersTotal,
prometheus.CounterValue,
- data[offersTotal].FirstValue,
+ c.perfDataObject[0].OffersTotal,
)
ch <- prometheus.MustNewConstMetric(
c.requestsTotal,
prometheus.CounterValue,
- data[requestsTotal].FirstValue,
+ c.perfDataObject[0].RequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.informsTotal,
prometheus.CounterValue,
- data[informsTotal].FirstValue,
+ c.perfDataObject[0].InformsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.acksTotal,
prometheus.CounterValue,
- data[acksTotal].FirstValue,
+ c.perfDataObject[0].AcksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.nACKsTotal,
prometheus.CounterValue,
- data[nacksTotal].FirstValue,
+ c.perfDataObject[0].NacksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.declinesTotal,
prometheus.CounterValue,
- data[declinesTotal].FirstValue,
+ c.perfDataObject[0].DeclinesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.releasesTotal,
prometheus.CounterValue,
- data[releasesTotal].FirstValue,
+ c.perfDataObject[0].ReleasesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.offerQueueLength,
prometheus.GaugeValue,
- data[offerQueueLength].FirstValue,
+ c.perfDataObject[0].OfferQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.deniedDueToMatch,
prometheus.CounterValue,
- data[deniedDueToMatch].FirstValue,
+ c.perfDataObject[0].DeniedDueToMatch,
)
ch <- prometheus.MustNewConstMetric(
c.deniedDueToNonMatch,
prometheus.CounterValue,
- data[deniedDueToNonMatch].FirstValue,
+ c.perfDataObject[0].DeniedDueToNonMatch,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdSentTotal,
prometheus.CounterValue,
- data[failoverBndUpdSentTotal].FirstValue,
+ c.perfDataObject[0].FailoverBndUpdSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdReceivedTotal,
prometheus.CounterValue,
- data[failoverBndUpdReceivedTotal].FirstValue,
+ c.perfDataObject[0].FailoverBndUpdReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndAckSentTotal,
prometheus.CounterValue,
- data[failoverBndAckSentTotal].FirstValue,
+ c.perfDataObject[0].FailoverBndAckSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndAckReceivedTotal,
prometheus.CounterValue,
- data[failoverBndAckReceivedTotal].FirstValue,
+ c.perfDataObject[0].FailoverBndAckReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdPendingOutboundQueue,
prometheus.GaugeValue,
- data[failoverBndUpdPendingOutboundQueue].FirstValue,
+ c.perfDataObject[0].FailoverBndUpdPendingOutboundQueue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsCommunicationInterruptedState,
prometheus.CounterValue,
- data[failoverTransitionsCommunicationInterruptedState].FirstValue,
+ c.perfDataObject[0].FailoverTransitionsCommunicationInterruptedState,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsPartnerDownState,
prometheus.CounterValue,
- data[failoverTransitionsPartnerDownState].FirstValue,
+ c.perfDataObject[0].FailoverTransitionsPartnerDownState,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsRecoverState,
prometheus.CounterValue,
- data[failoverTransitionsRecoverState].FirstValue,
+ c.perfDataObject[0].FailoverTransitionsRecoverState,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdDropped,
prometheus.CounterValue,
- data[failoverBndUpdDropped].FirstValue,
+ c.perfDataObject[0].FailoverBndUpdDropped,
)
return nil
diff --git a/internal/collector/dhcp/types.go b/internal/collector/dhcp/types.go
new file mode 100644
index 000000000..c6974ce42
--- /dev/null
+++ b/internal/collector/dhcp/types.go
@@ -0,0 +1,44 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package dhcp
+
+type perfDataCounterValues struct {
+ AcksTotal float64 `perfdata:"Acks/sec"`
+ ActiveQueueLength float64 `perfdata:"Active Queue Length"`
+ ConflictCheckQueueLength float64 `perfdata:"Conflict Check Queue Length"`
+ DeclinesTotal float64 `perfdata:"Declines/sec"`
+ DeniedDueToMatch float64 `perfdata:"Denied due to match."`
+ DeniedDueToNonMatch float64 `perfdata:"Denied due to match."`
+ DiscoversTotal float64 `perfdata:"Discovers/sec"`
+ DuplicatesDroppedTotal float64 `perfdata:"Duplicates Dropped/sec"`
+ FailoverBndAckReceivedTotal float64 `perfdata:"Failover: BndAck received/sec."`
+ FailoverBndAckSentTotal float64 `perfdata:"Failover: BndAck sent/sec."`
+ FailoverBndUpdDropped float64 `perfdata:"Failover: BndUpd Dropped."`
+ FailoverBndUpdPendingOutboundQueue float64 `perfdata:"Failover: BndUpd pending in outbound queue."`
+ FailoverBndUpdReceivedTotal float64 `perfdata:"Failover: BndUpd received/sec."`
+ FailoverBndUpdSentTotal float64 `perfdata:"Failover: BndUpd sent/sec."`
+ FailoverTransitionsCommunicationInterruptedState float64 `perfdata:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
+ FailoverTransitionsPartnerDownState float64 `perfdata:"Failover: Transitions to PARTNER-DOWN state."`
+ FailoverTransitionsRecoverState float64 `perfdata:"Failover: Transitions to RECOVER state."`
+ InformsTotal float64 `perfdata:"Informs/sec"`
+ NacksTotal float64 `perfdata:"Nacks/sec"`
+ OfferQueueLength float64 `perfdata:"Offer Queue Length"`
+ OffersTotal float64 `perfdata:"Offers/sec"`
+ PacketsExpiredTotal float64 `perfdata:"Packets Expired/sec"`
+ PacketsReceivedTotal float64 `perfdata:"Packets Received/sec"`
+ ReleasesTotal float64 `perfdata:"Releases/sec"`
+ RequestsTotal float64 `perfdata:"Requests/sec"`
+}
diff --git a/internal/collector/dns/const.go b/internal/collector/dns/const.go
deleted file mode 100644
index 6368df25e..000000000
--- a/internal/collector/dns/const.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package dns
-
-const (
- _ = "% User Time"
- _ = "176"
- _ = "Async Fast Reads/sec"
- axfrRequestReceived = "AXFR Request Received"
- axfrRequestSent = "AXFR Request Sent"
- axfrResponseReceived = "AXFR Response Received"
- axfrSuccessReceived = "AXFR Success Received"
- axfrSuccessSent = "AXFR Success Sent"
- cachingMemory = "Caching Memory"
- _ = "Data Flush Pages/sec"
- _ = "Data Flushes/sec"
- databaseNodeMemory = "Database Node Memory"
- dynamicUpdateNoOperation = "Dynamic Update NoOperation"
- _ = "Dynamic Update NoOperation/sec"
- dynamicUpdateQueued = "Dynamic Update Queued"
- _ = "Dynamic Update Received"
- _ = "Dynamic Update Received/sec"
- dynamicUpdateRejected = "Dynamic Update Rejected"
- dynamicUpdateTimeOuts = "Dynamic Update TimeOuts"
- dynamicUpdateWrittenToDatabase = "Dynamic Update Written to Database"
- _ = "Dynamic Update Written to Database/sec"
- _ = "Enumerations Server/sec"
- _ = "Fast Read Not Possibles/sec"
- _ = "Fast Read Resource Misses/sec"
- ixfrRequestReceived = "IXFR Request Received"
- ixfrRequestSent = "IXFR Request Sent"
- ixfrResponseReceived = "IXFR Response Received"
- _ = "IXFR Success Received"
- ixfrSuccessSent = "IXFR Success Sent"
- ixfrTCPSuccessReceived = "IXFR TCP Success Received"
- ixfrUDPSuccessReceived = "IXFR UDP Success Received"
- _ = "Lazy Write Flushes/sec"
- _ = "Lazy Write Pages/sec"
- _ = "Level 2 TLB Fills/sec"
- nbStatMemory = "Nbstat Memory"
- notifyReceived = "Notify Received"
- notifySent = "Notify Sent"
- _ = "Query Dropped Bad Socket"
- _ = "Query Dropped Bad Socket/sec"
- _ = "Query Dropped By Policy"
- _ = "Query Dropped By Policy/sec"
- _ = "Query Dropped By Response Rate Limiting"
- _ = "Query Dropped By Response Rate Limiting/sec"
- _ = "Query Dropped Send"
- _ = "Query Dropped Send/sec"
- _ = "Query Dropped Total"
- _ = "Query Dropped Total/sec"
- recordFlowMemory = "Record Flow Memory"
- recursiveQueries = "Recursive Queries"
- _ = "Recursive Queries/sec"
- recursiveQueryFailure = "Recursive Query Failure"
- _ = "Recursive Query Failure/sec"
- _ = "Recursive Send TimeOuts"
- recursiveSendTimeOuts = "Recursive TimeOut/sec"
- _ = "Responses Suppressed"
- _ = "Responses Suppressed/sec"
- secureUpdateFailure = "Secure Update Failure"
- secureUpdateReceived = "Secure Update Received"
- _ = "Secure Update Received/sec"
- tcpMessageMemory = "TCP Message Memory"
- tcpQueryReceived = "TCP Query Received"
- _ = "TCP Query Received/sec"
- tcpResponseSent = "TCP Response Sent"
- _ = "TCP Response Sent/sec"
- _ = "Total Query Received"
- _ = "Total Query Received/sec"
- _ = "Total Remote Inflight Queries"
- _ = "Total Response Sent"
- _ = "Total Response Sent/sec"
- udpMessageMemory = "UDP Message Memory"
- udpQueryReceived = "UDP Query Received"
- _ = "UDP Query Received/sec"
- udpResponseSent = "UDP Response Sent"
- _ = "UDP Response Sent/sec"
- unmatchedResponsesReceived = "Unmatched Responses Received"
- _ = "Virtual Bytes"
- winsLookupReceived = "WINS Lookup Received"
- _ = "WINS Lookup Received/sec"
- winsResponseSent = "WINS Response Sent"
- _ = "WINS Response Sent/sec"
- winsReverseLookupReceived = "WINS Reverse Lookup Received"
- _ = "WINS Reverse Lookup Received/sec"
- winsReverseResponseSent = "WINS Reverse Response Sent"
- _ = "WINS Reverse Response Sent/sec"
- zoneTransferFailure = "Zone Transfer Failure"
- zoneTransferSOARequestSent = "Zone Transfer Request Received"
- _ = "Zone Transfer SOA Request Sent"
- _ = "Zone Transfer Success"
-)
diff --git a/internal/collector/dns/dns.go b/internal/collector/dns/dns.go
index 3e5dee7d5..17c651f84 100644
--- a/internal/collector/dns/dns.go
+++ b/internal/collector/dns/dns.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
dynamicUpdatesFailures *prometheus.Desc
dynamicUpdatesQueued *prometheus.Desc
@@ -92,48 +93,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("DNS", perfdata.InstancesAll, []string{
- axfrRequestReceived,
- axfrRequestSent,
- axfrResponseReceived,
- axfrSuccessReceived,
- axfrSuccessSent,
- cachingMemory,
- databaseNodeMemory,
- dynamicUpdateNoOperation,
- dynamicUpdateQueued,
- dynamicUpdateRejected,
- dynamicUpdateTimeOuts,
- dynamicUpdateWrittenToDatabase,
- ixfrRequestReceived,
- ixfrRequestSent,
- ixfrResponseReceived,
- ixfrSuccessSent,
- ixfrTCPSuccessReceived,
- ixfrUDPSuccessReceived,
- nbStatMemory,
- notifyReceived,
- notifySent,
- recordFlowMemory,
- recursiveQueries,
- recursiveQueryFailure,
- recursiveSendTimeOuts,
- secureUpdateFailure,
- secureUpdateReceived,
- tcpMessageMemory,
- tcpQueryReceived,
- tcpResponseSent,
- udpMessageMemory,
- udpQueryReceived,
- udpResponseSent,
- unmatchedResponsesReceived,
- winsLookupReceived,
- winsResponseSent,
- winsReverseLookupReceived,
- winsReverseResponseSent,
- zoneTransferFailure,
- zoneTransferSOARequestSent,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("DNS", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err)
}
@@ -277,79 +237,74 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DNS metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("failed to collect DNS metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsReceived,
prometheus.CounterValue,
- data[axfrRequestReceived].FirstValue,
+ c.perfDataObject[0].AxfrRequestReceived,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsReceived,
prometheus.CounterValue,
- data[ixfrRequestReceived].FirstValue,
+ c.perfDataObject[0].IxfrRequestReceived,
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent,
prometheus.CounterValue,
- data[axfrRequestSent].FirstValue,
+ c.perfDataObject[0].AxfrRequestSent,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent,
prometheus.CounterValue,
- data[ixfrRequestSent].FirstValue,
+ c.perfDataObject[0].IxfrRequestSent,
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent,
prometheus.CounterValue,
- data[zoneTransferSOARequestSent].FirstValue,
+ c.perfDataObject[0].ZoneTransferSOARequestSent,
"soa",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferResponsesReceived,
prometheus.CounterValue,
- data[axfrResponseReceived].FirstValue,
+ c.perfDataObject[0].AxfrResponseReceived,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferResponsesReceived,
prometheus.CounterValue,
- data[ixfrResponseReceived].FirstValue,
+ c.perfDataObject[0].IxfrResponseReceived,
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
- data[axfrSuccessReceived].FirstValue,
+ c.perfDataObject[0].AxfrSuccessReceived,
"full",
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
- data[ixfrTCPSuccessReceived].FirstValue,
+ c.perfDataObject[0].IxfrTCPSuccessReceived,
"incremental",
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
- data[ixfrTCPSuccessReceived].FirstValue,
+ c.perfDataObject[0].IxfrTCPSuccessReceived,
"incremental",
"udp",
)
@@ -357,183 +312,183 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessSent,
prometheus.CounterValue,
- data[axfrSuccessSent].FirstValue,
+ c.perfDataObject[0].AxfrSuccessSent,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessSent,
prometheus.CounterValue,
- data[ixfrSuccessSent].FirstValue,
+ c.perfDataObject[0].IxfrSuccessSent,
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferFailures,
prometheus.CounterValue,
- data[zoneTransferFailure].FirstValue,
+ c.perfDataObject[0].ZoneTransferFailure,
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
- data[cachingMemory].FirstValue,
+ c.perfDataObject[0].CachingMemory,
"caching",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
- data[databaseNodeMemory].FirstValue,
+ c.perfDataObject[0].DatabaseNodeMemory,
"database_node",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
- data[nbStatMemory].FirstValue,
+ c.perfDataObject[0].NbStatMemory,
"nbstat",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
- data[recordFlowMemory].FirstValue,
+ c.perfDataObject[0].RecordFlowMemory,
"record_flow",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
- data[tcpMessageMemory].FirstValue,
+ c.perfDataObject[0].TcpMessageMemory,
"tcp_message",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
- data[udpMessageMemory].FirstValue,
+ c.perfDataObject[0].UdpMessageMemory,
"udp_message",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesReceived,
prometheus.CounterValue,
- data[dynamicUpdateNoOperation].FirstValue,
+ c.perfDataObject[0].DynamicUpdateNoOperation,
"noop",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesReceived,
prometheus.CounterValue,
- data[dynamicUpdateWrittenToDatabase].FirstValue,
+ c.perfDataObject[0].DynamicUpdateWrittenToDatabase,
"written",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesQueued,
prometheus.GaugeValue,
- data[dynamicUpdateQueued].FirstValue,
+ c.perfDataObject[0].DynamicUpdateQueued,
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesFailures,
prometheus.CounterValue,
- data[dynamicUpdateRejected].FirstValue,
+ c.perfDataObject[0].DynamicUpdateRejected,
"rejected",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesFailures,
prometheus.CounterValue,
- data[dynamicUpdateTimeOuts].FirstValue,
+ c.perfDataObject[0].DynamicUpdateTimeOuts,
"timeout",
)
ch <- prometheus.MustNewConstMetric(
c.notifyReceived,
prometheus.CounterValue,
- data[notifyReceived].FirstValue,
+ c.perfDataObject[0].NotifyReceived,
)
ch <- prometheus.MustNewConstMetric(
c.notifySent,
prometheus.CounterValue,
- data[notifySent].FirstValue,
+ c.perfDataObject[0].NotifySent,
)
ch <- prometheus.MustNewConstMetric(
c.recursiveQueries,
prometheus.CounterValue,
- data[recursiveQueries].FirstValue,
+ c.perfDataObject[0].RecursiveQueries,
)
ch <- prometheus.MustNewConstMetric(
c.recursiveQueryFailures,
prometheus.CounterValue,
- data[recursiveQueryFailure].FirstValue,
+ c.perfDataObject[0].RecursiveQueryFailure,
)
ch <- prometheus.MustNewConstMetric(
c.recursiveQuerySendTimeouts,
prometheus.CounterValue,
- data[recursiveSendTimeOuts].FirstValue,
+ c.perfDataObject[0].RecursiveSendTimeOuts,
)
ch <- prometheus.MustNewConstMetric(
c.queries,
prometheus.CounterValue,
- data[tcpQueryReceived].FirstValue,
+ c.perfDataObject[0].TcpQueryReceived,
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.queries,
prometheus.CounterValue,
- data[udpQueryReceived].FirstValue,
+ c.perfDataObject[0].UdpQueryReceived,
"udp",
)
ch <- prometheus.MustNewConstMetric(
c.responses,
prometheus.CounterValue,
- data[tcpResponseSent].FirstValue,
+ c.perfDataObject[0].TcpResponseSent,
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.responses,
prometheus.CounterValue,
- data[udpResponseSent].FirstValue,
+ c.perfDataObject[0].UdpResponseSent,
"udp",
)
ch <- prometheus.MustNewConstMetric(
c.unmatchedResponsesReceived,
prometheus.CounterValue,
- data[unmatchedResponsesReceived].FirstValue,
+ c.perfDataObject[0].UnmatchedResponsesReceived,
)
ch <- prometheus.MustNewConstMetric(
c.winsQueries,
prometheus.CounterValue,
- data[winsLookupReceived].FirstValue,
+ c.perfDataObject[0].WinsLookupReceived,
"forward",
)
ch <- prometheus.MustNewConstMetric(
c.winsQueries,
prometheus.CounterValue,
- data[winsReverseLookupReceived].FirstValue,
+ c.perfDataObject[0].WinsReverseLookupReceived,
"reverse",
)
ch <- prometheus.MustNewConstMetric(
c.winsResponses,
prometheus.CounterValue,
- data[winsResponseSent].FirstValue,
+ c.perfDataObject[0].WinsResponseSent,
"forward",
)
ch <- prometheus.MustNewConstMetric(
c.winsResponses,
prometheus.CounterValue,
- data[winsReverseResponseSent].FirstValue,
+ c.perfDataObject[0].WinsReverseResponseSent,
"reverse",
)
ch <- prometheus.MustNewConstMetric(
c.secureUpdateFailures,
prometheus.CounterValue,
- data[secureUpdateFailure].FirstValue,
+ c.perfDataObject[0].SecureUpdateFailure,
)
ch <- prometheus.MustNewConstMetric(
c.secureUpdateReceived,
prometheus.CounterValue,
- data[secureUpdateReceived].FirstValue,
+ c.perfDataObject[0].SecureUpdateReceived,
)
return nil
diff --git a/internal/collector/dns/types.go b/internal/collector/dns/types.go
new file mode 100644
index 000000000..229533676
--- /dev/null
+++ b/internal/collector/dns/types.go
@@ -0,0 +1,107 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package dns
+
+type perfDataCounterValues struct {
+ _ float64 `perfdata:"% User Time"`
+ _ float64 `perfdata:"176"`
+ _ float64 `perfdata:"Async Fast Reads/sec"`
+ AxfrRequestReceived float64 `perfdata:"AXFR Request Received"`
+ AxfrRequestSent float64 `perfdata:"AXFR Request Sent"`
+ AxfrResponseReceived float64 `perfdata:"AXFR Response Received"`
+ AxfrSuccessReceived float64 `perfdata:"AXFR Success Received"`
+ AxfrSuccessSent float64 `perfdata:"AXFR Success Sent"`
+ CachingMemory float64 `perfdata:"Caching Memory"`
+ _ float64 `perfdata:"Data Flush Pages/sec"`
+ _ float64 `perfdata:"Data Flushes/sec"`
+ DatabaseNodeMemory float64 `perfdata:"Database Node Memory"`
+ DynamicUpdateNoOperation float64 `perfdata:"Dynamic Update NoOperation"`
+ _ float64 `perfdata:"Dynamic Update NoOperation/sec"`
+ DynamicUpdateQueued float64 `perfdata:"Dynamic Update Queued"`
+ _ float64 `perfdata:"Dynamic Update Received"`
+ _ float64 `perfdata:"Dynamic Update Received/sec"`
+ DynamicUpdateRejected float64 `perfdata:"Dynamic Update Rejected"`
+ DynamicUpdateTimeOuts float64 `perfdata:"Dynamic Update TimeOuts"`
+ DynamicUpdateWrittenToDatabase float64 `perfdata:"Dynamic Update Written to Database"`
+ _ float64 `perfdata:"Dynamic Update Written to Database/sec"`
+ _ float64 `perfdata:"Enumerations Server/sec"`
+ _ float64 `perfdata:"Fast Read Not Possibles/sec"`
+ _ float64 `perfdata:"Fast Read Resource Misses/sec"`
+ IxfrRequestReceived float64 `perfdata:"IXFR Request Received"`
+ IxfrRequestSent float64 `perfdata:"IXFR Request Sent"`
+ IxfrResponseReceived float64 `perfdata:"IXFR Response Received"`
+ _ float64 `perfdata:"IXFR Success Received"`
+ IxfrSuccessSent float64 `perfdata:"IXFR Success Sent"`
+ IxfrTCPSuccessReceived float64 `perfdata:"IXFR TCP Success Received"`
+ IxfrUDPSuccessReceived float64 `perfdata:"IXFR UDP Success Received"`
+ _ float64 `perfdata:"Lazy Write Flushes/sec"`
+ _ float64 `perfdata:"Lazy Write Pages/sec"`
+ _ float64 `perfdata:"Level 2 TLB Fills/sec"`
+ NbStatMemory float64 `perfdata:"Nbstat Memory"`
+ NotifyReceived float64 `perfdata:"Notify Received"`
+ NotifySent float64 `perfdata:"Notify Sent"`
+ _ float64 `perfdata:"Query Dropped Bad Socket"`
+ _ float64 `perfdata:"Query Dropped Bad Socket/sec"`
+ _ float64 `perfdata:"Query Dropped By Policy"`
+ _ float64 `perfdata:"Query Dropped By Policy/sec"`
+ _ float64 `perfdata:"Query Dropped By Response Rate Limiting"`
+ _ float64 `perfdata:"Query Dropped By Response Rate Limiting/sec"`
+ _ float64 `perfdata:"Query Dropped Send"`
+ _ float64 `perfdata:"Query Dropped Send/sec"`
+ _ float64 `perfdata:"Query Dropped Total"`
+ _ float64 `perfdata:"Query Dropped Total/sec"`
+ RecordFlowMemory float64 `perfdata:"Record Flow Memory"`
+ RecursiveQueries float64 `perfdata:"Recursive Queries"`
+ _ float64 `perfdata:"Recursive Queries/sec"`
+ RecursiveQueryFailure float64 `perfdata:"Recursive Query Failure"`
+ _ float64 `perfdata:"Recursive Query Failure/sec"`
+ _ float64 `perfdata:"Recursive Send TimeOuts"`
+ RecursiveSendTimeOuts float64 `perfdata:"Recursive TimeOut/sec"`
+ _ float64 `perfdata:"Responses Suppressed"`
+ _ float64 `perfdata:"Responses Suppressed/sec"`
+ SecureUpdateFailure float64 `perfdata:"Secure Update Failure"`
+ SecureUpdateReceived float64 `perfdata:"Secure Update Received"`
+ _ float64 `perfdata:"Secure Update Received/sec"`
+ TcpMessageMemory float64 `perfdata:"TCP Message Memory"`
+ TcpQueryReceived float64 `perfdata:"TCP Query Received"`
+ _ float64 `perfdata:"TCP Query Received/sec"`
+ TcpResponseSent float64 `perfdata:"TCP Response Sent"`
+ _ float64 `perfdata:"TCP Response Sent/sec"`
+ _ float64 `perfdata:"Total Query Received"`
+ _ float64 `perfdata:"Total Query Received/sec"`
+ _ float64 `perfdata:"Total Remote Inflight Queries"`
+ _ float64 `perfdata:"Total Response Sent"`
+ _ float64 `perfdata:"Total Response Sent/sec"`
+ UdpMessageMemory float64 `perfdata:"UDP Message Memory"`
+ UdpQueryReceived float64 `perfdata:"UDP Query Received"`
+ _ float64 `perfdata:"UDP Query Received/sec"`
+ UdpResponseSent float64 `perfdata:"UDP Response Sent"`
+ _ float64 `perfdata:"UDP Response Sent/sec"`
+ UnmatchedResponsesReceived float64 `perfdata:"Unmatched Responses Received"`
+ _ float64 `perfdata:"Virtual Bytes"`
+ WinsLookupReceived float64 `perfdata:"WINS Lookup Received"`
+ _ float64 `perfdata:"WINS Lookup Received/sec"`
+ WinsResponseSent float64 `perfdata:"WINS Response Sent"`
+ _ float64 `perfdata:"WINS Response Sent/sec"`
+ WinsReverseLookupReceived float64 `perfdata:"WINS Reverse Lookup Received"`
+ _ float64 `perfdata:"WINS Reverse Lookup Received/sec"`
+ WinsReverseResponseSent float64 `perfdata:"WINS Reverse Response Sent"`
+ _ float64 `perfdata:"WINS Reverse Response Sent/sec"`
+ ZoneTransferFailure float64 `perfdata:"Zone Transfer Failure"`
+ ZoneTransferSOARequestSent float64 `perfdata:"Zone Transfer Request Received"`
+ _ float64 `perfdata:"Zone Transfer SOA Request Sent"`
+ _ float64 `perfdata:"Zone Transfer Success"`
+}
diff --git a/internal/collector/exchange/exchange.go b/internal/collector/exchange/exchange.go
index a28d7581a..13f2465bf 100644
--- a/internal/collector/exchange/exchange.go
+++ b/internal/collector/exchange/exchange.go
@@ -21,26 +21,26 @@ import (
"log/slog"
"os"
"strings"
+ "sync"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "exchange"
const (
- adAccessProcesses = "ADAccessProcesses"
- transportQueues = "TransportQueues"
- httpProxy = "HttpProxy"
- activeSync = "ActiveSync"
- availabilityService = "AvailabilityService"
- outlookWebAccess = "OutlookWebAccess"
- autoDiscover = "Autodiscover"
- workloadManagement = "WorkloadManagement"
- rpcClientAccess = "RpcClientAccess"
- mapiHttpEmsmdb = "MapiHttpEmsmdb"
+ subCollectorADAccessProcesses = "ADAccessProcesses"
+ subCollectorTransportQueues = "TransportQueues"
+ subCollectorHttpProxy = "HttpProxy"
+ subCollectorActiveSync = "ActiveSync"
+ subCollectorAvailabilityService = "AvailabilityService"
+ subCollectorOutlookWebAccess = "OutlookWebAccess"
+ subCollectorAutoDiscover = "Autodiscover"
+ subCollectorWorkloadManagement = "WorkloadManagement"
+ subCollectorRpcClientAccess = "RpcClientAccess"
+ subCollectorMapiHttpEmsmdb = "MapiHttpEmsmdb"
)
type Config struct {
@@ -50,82 +50,35 @@ type Config struct {
//nolint:gochecknoglobals
var ConfigDefaults = Config{
CollectorsEnabled: []string{
- adAccessProcesses,
- transportQueues,
- httpProxy,
- activeSync,
- availabilityService,
- outlookWebAccess,
- autoDiscover,
- workloadManagement,
- rpcClientAccess,
- mapiHttpEmsmdb,
+ subCollectorADAccessProcesses,
+ subCollectorTransportQueues,
+ subCollectorHttpProxy,
+ subCollectorActiveSync,
+ subCollectorAvailabilityService,
+ subCollectorOutlookWebAccess,
+ subCollectorAutoDiscover,
+ subCollectorWorkloadManagement,
+ subCollectorRpcClientAccess,
+ subCollectorMapiHttpEmsmdb,
},
}
type Collector struct {
config Config
- perfDataCollectorADAccessProcesses *perfdata.Collector
- perfDataCollectorTransportQueues *perfdata.Collector
- perfDataCollectorHttpProxy *perfdata.Collector
- perfDataCollectorActiveSync *perfdata.Collector
- perfDataCollectorAvailabilityService *perfdata.Collector
- perfDataCollectorOWA *perfdata.Collector
- perfDataCollectorAutoDiscover *perfdata.Collector
- perfDataCollectorWorkloadManagementWorkloads *perfdata.Collector
- perfDataCollectorRpcClientAccess *perfdata.Collector
- perfDataCollectorMapiHttpEmsmdb *perfdata.Collector
-
- activeMailboxDeliveryQueueLength *prometheus.Desc
- activeSyncRequestsPerSec *prometheus.Desc
- activeTasks *prometheus.Desc
- activeUserCount *prometheus.Desc
- activeUserCountMapiHttpEmsMDB *prometheus.Desc
- autoDiscoverRequestsPerSec *prometheus.Desc
- availabilityRequestsSec *prometheus.Desc
- averageAuthenticationLatency *prometheus.Desc
- averageCASProcessingLatency *prometheus.Desc
- completedTasks *prometheus.Desc
- connectionCount *prometheus.Desc
- currentUniqueUsers *prometheus.Desc
- externalActiveRemoteDeliveryQueueLength *prometheus.Desc
- externalLargestDeliveryQueueLength *prometheus.Desc
- internalActiveRemoteDeliveryQueueLength *prometheus.Desc
- internalLargestDeliveryQueueLength *prometheus.Desc
- isActive *prometheus.Desc
- ldapReadTime *prometheus.Desc
- ldapSearchTime *prometheus.Desc
- ldapTimeoutErrorsPerSec *prometheus.Desc
- ldapWriteTime *prometheus.Desc
- longRunningLDAPOperationsPerMin *prometheus.Desc
- mailboxServerLocatorAverageLatency *prometheus.Desc
- mailboxServerProxyFailureRate *prometheus.Desc
- outstandingProxyRequests *prometheus.Desc
- owaRequestsPerSec *prometheus.Desc
- pingCommandsPending *prometheus.Desc
- poisonQueueLength *prometheus.Desc
- proxyRequestsPerSec *prometheus.Desc
- queuedTasks *prometheus.Desc
- retryMailboxDeliveryQueueLength *prometheus.Desc
- rpcAveragedLatency *prometheus.Desc
- rpcOperationsPerSec *prometheus.Desc
- rpcRequests *prometheus.Desc
- syncCommandsPerSec *prometheus.Desc
- unreachableQueueLength *prometheus.Desc
- userCount *prometheus.Desc
- yieldedTasks *prometheus.Desc
- messagesQueuedForDeliveryTotal *prometheus.Desc
- messagesSubmittedTotal *prometheus.Desc
- messagesDelayedTotal *prometheus.Desc
- messagesCompletedDeliveryTotal *prometheus.Desc
- shadowQueueLength *prometheus.Desc
- submissionQueueLength *prometheus.Desc
- delayQueueLength *prometheus.Desc
- itemsCompletedDeliveryTotal *prometheus.Desc
- itemsQueuedForDeliveryExpiredTotal *prometheus.Desc
- itemsQueuedForDeliveryTotal *prometheus.Desc
- itemsResubmittedTotal *prometheus.Desc
+ collectorFns []func(ch chan<- prometheus.Metric) error
+ closeFns []func()
+
+ collectorADAccessProcesses
+ collectorActiveSync
+ collectorAutoDiscover
+ collectorAvailabilityService
+ collectorHTTPProxy
+ collectorMapiHttpEmsmdb
+ collectorOWA
+ collectorRpcClientAccess
+ collectorTransportQueues
+ collectorWorkloadManagementWorkloads
}
func New(config *Config) *Collector {
@@ -167,16 +120,16 @@ func NewWithFlags(app *kingpin.Application) *Collector {
app.PreAction(func(*kingpin.ParseContext) error {
if listAllCollectors {
collectorDesc := map[string]string{
- adAccessProcesses: "[19108] MSExchange ADAccess Processes",
- transportQueues: "[20524] MSExchangeTransport Queues",
- httpProxy: "[36934] MSExchange HttpProxy",
- activeSync: "[25138] MSExchange ActiveSync",
- availabilityService: "[24914] MSExchange Availability Service",
- outlookWebAccess: "[24618] MSExchange OWA",
- autoDiscover: "[29240] MSExchange Autodiscover",
- workloadManagement: "[19430] MSExchange WorkloadManagement Workloads",
- rpcClientAccess: "[29336] MSExchange RpcClientAccess",
- mapiHttpEmsmdb: "[26463] MSExchange MapiHttp Emsmdb",
+ subCollectorADAccessProcesses: "[19108] MSExchange ADAccess Processes",
+ subCollectorTransportQueues: "[20524] MSExchangeTransport Queues",
+ subCollectorHttpProxy: "[36934] MSExchange HttpProxy",
+ subCollectorActiveSync: "[25138] MSExchange ActiveSync",
+ subCollectorAvailabilityService: "[24914] MSExchange Availability Service",
+ subCollectorOutlookWebAccess: "[24618] MSExchange OWA",
+ subCollectorAutoDiscover: "[29240] MSExchange Autodiscover",
+ subCollectorWorkloadManagement: "[19430] MSExchange WorkloadManagement Workloads",
+ subCollectorRpcClientAccess: "[29336] MSExchange RpcClientAccess",
+ subCollectorMapiHttpEmsmdb: "[26463] MSExchange MapiHttp Emsmdb",
}
sb := strings.Builder{}
@@ -208,51 +161,116 @@ func (c *Collector) GetName() string {
}
func (c *Collector) Close() error {
+ for _, fn := range c.closeFns {
+ fn()
+ }
+
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
- collectorFuncs := map[string]func() error{
- adAccessProcesses: c.buildADAccessProcesses,
- transportQueues: c.buildTransportQueues,
- httpProxy: c.buildHTTPProxy,
- activeSync: c.buildActiveSync,
- availabilityService: c.buildAvailabilityService,
- outlookWebAccess: c.buildOWA,
- autoDiscover: c.buildAutoDiscover,
- workloadManagement: c.buildWorkloadManagementWorkloads,
- rpcClientAccess: c.buildRPC,
- mapiHttpEmsmdb: c.buildMapiHttpEmsmdb,
+ subCollectors := map[string]struct {
+ build func() error
+ collect func(ch chan<- prometheus.Metric) error
+ close func()
+ }{
+ subCollectorADAccessProcesses: {
+ build: c.buildADAccessProcesses,
+ collect: c.collectADAccessProcesses,
+ close: c.perfDataCollectorADAccessProcesses.Close,
+ },
+ subCollectorTransportQueues: {
+ build: c.buildTransportQueues,
+ collect: c.collectTransportQueues,
+ close: c.perfDataCollectorTransportQueues.Close,
+ },
+ subCollectorHttpProxy: {
+ build: c.buildHTTPProxy,
+ collect: c.collectHTTPProxy,
+ close: c.perfDataCollectorHTTPProxy.Close,
+ },
+ subCollectorActiveSync: {
+ build: c.buildActiveSync,
+ collect: c.collectActiveSync,
+ close: c.perfDataCollectorActiveSync.Close,
+ },
+ subCollectorAvailabilityService: {
+ build: c.buildAvailabilityService,
+ collect: c.collectAvailabilityService,
+ close: c.perfDataCollectorAvailabilityService.Close,
+ },
+ subCollectorOutlookWebAccess: {
+ build: c.buildOWA,
+ collect: c.collectOWA,
+ close: c.perfDataCollectorOWA.Close,
+ },
+ subCollectorAutoDiscover: {
+ build: c.buildAutoDiscover,
+ collect: c.collectAutoDiscover,
+ close: c.perfDataCollectorAutoDiscover.Close,
+ },
+ subCollectorWorkloadManagement: {
+ build: c.buildWorkloadManagementWorkloads,
+ collect: c.collectWorkloadManagementWorkloads,
+ close: c.perfDataCollectorWorkloadManagementWorkloads.Close,
+ },
+ subCollectorRpcClientAccess: {
+ build: c.buildRpcClientAccess,
+ collect: c.collectRpcClientAccess,
+ close: c.perfDataCollectorRpcClientAccess.Close,
+ },
+ subCollectorMapiHttpEmsmdb: {
+ build: c.buildMapiHttpEmsmdb,
+ collect: c.collectMapiHttpEmsmdb,
+ close: c.perfDataCollectorMapiHttpEmsmdb.Close,
+ },
}
- for _, collectorName := range c.config.CollectorsEnabled {
- if err := collectorFuncs[collectorName](); err != nil {
- return err
+ errs := make([]error, 0, len(c.config.CollectorsEnabled))
+
+ for _, name := range c.config.CollectorsEnabled {
+ if _, ok := subCollectors[name]; !ok {
+ return fmt.Errorf("unknown collector: %s", name)
}
+
+ if err := subCollectors[name].build(); err != nil {
+ errs = append(errs, fmt.Errorf("failed to build %s collector: %w", name, err))
+
+ continue
+ }
+
+ c.collectorFns = append(c.collectorFns, subCollectors[name].collect)
+ c.closeFns = append(c.closeFns, subCollectors[name].close)
}
- return nil
+ return errors.Join(errs...)
}
// Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- collectorFuncs := map[string]func(ch chan<- prometheus.Metric) error{
- adAccessProcesses: c.collectADAccessProcesses,
- transportQueues: c.collectTransportQueues,
- httpProxy: c.collectHTTPProxy,
- activeSync: c.collectActiveSync,
- availabilityService: c.collectAvailabilityService,
- outlookWebAccess: c.collectOWA,
- autoDiscover: c.collectAutoDiscover,
- workloadManagement: c.collectWorkloadManagementWorkloads,
- rpcClientAccess: c.collectRPC,
- mapiHttpEmsmdb: c.collectMapiHttpEmsmdb,
+ errCh := make(chan error, len(c.collectorFns))
+ errs := make([]error, 0, len(c.collectorFns))
+
+ wg := sync.WaitGroup{}
+
+ for _, fn := range c.collectorFns {
+ wg.Add(1)
+
+ go func(fn func(ch chan<- prometheus.Metric) error) {
+ defer wg.Done()
+
+ if err := fn(ch); err != nil {
+ errCh <- err
+ }
+ }(fn)
}
- errs := make([]error, len(c.config.CollectorsEnabled))
+ wg.Wait()
+
+ close(errCh)
- for i, collectorName := range c.config.CollectorsEnabled {
- errs[i] = collectorFuncs[collectorName](ch)
+ for err := range errCh {
+ errs = append(errs, err)
}
return errors.Join(errs...)
@@ -265,8 +283,3 @@ func (c *Collector) toLabelName(name string) string {
return s
}
-
-// msToSec converts from ms to seconds.
-func (c *Collector) msToSec(t float64) float64 {
- return t / 1000
-}
diff --git a/internal/collector/exchange/exchange_active_sync.go b/internal/collector/exchange/exchange_active_sync.go
index 06c2e87ee..618e830b1 100644
--- a/internal/collector/exchange/exchange_active_sync.go
+++ b/internal/collector/exchange/exchange_active_sync.go
@@ -18,27 +18,30 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- requestsPerSec = "Requests/sec"
- pingCommandsPending = "Ping Commands Pending"
- syncCommandsPerSec = "Sync Commands/sec"
-)
+type collectorActiveSync struct {
+ perfDataCollectorActiveSync *pdh.Collector
+ perfDataObjectActiveSync []perfDataCounterValuesActiveSync
-func (c *Collector) buildActiveSync() error {
- counters := []string{
- requestsPerSec,
- pingCommandsPending,
- syncCommandsPerSec,
- }
+ activeSyncRequestsPerSec *prometheus.Desc
+ pingCommandsPending *prometheus.Desc
+ syncCommandsPerSec *prometheus.Desc
+}
+type perfDataCounterValuesActiveSync struct {
+ RequestsPerSec float64 `perfdata:"Requests/sec"`
+ PingCommandsPending float64 `perfdata:"Ping Commands Pending"`
+ SyncCommandsPerSec float64 `perfdata:"Sync Commands/sec"`
+}
+
+func (c *Collector) buildActiveSync() error {
var err error
- c.perfDataCollectorActiveSync, err = perfdata.NewCollector("MSExchange ActiveSync", perfdata.InstancesAll, counters)
+ c.perfDataCollectorActiveSync, err = pdh.NewCollector[perfDataCounterValuesActiveSync]("MSExchange ActiveSync", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
}
@@ -66,30 +69,26 @@ func (c *Collector) buildActiveSync() error {
}
func (c *Collector) collectActiveSync(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorActiveSync.Collect()
+ err := c.perfDataCollectorActiveSync.Collect(&c.perfDataObjectActiveSync)
if err != nil {
return fmt.Errorf("failed to collect MSExchange ActiveSync metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange ActiveSync metrics: %w", types.ErrNoData)
- }
-
- for _, data := range perfData {
+ for _, data := range c.perfDataObjectActiveSync {
ch <- prometheus.MustNewConstMetric(
c.activeSyncRequestsPerSec,
prometheus.CounterValue,
- data[requestsPerSec].FirstValue,
+ data.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.pingCommandsPending,
prometheus.GaugeValue,
- data[pingCommandsPending].FirstValue,
+ data.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.syncCommandsPerSec,
prometheus.CounterValue,
- data[syncCommandsPerSec].FirstValue,
+ data.SyncCommandsPerSec,
)
}
diff --git a/internal/collector/exchange/exchange_ad_access_processes.go b/internal/collector/exchange/exchange_ad_access_processes.go
index dbf41031e..cfda3791f 100644
--- a/internal/collector/exchange/exchange_ad_access_processes.go
+++ b/internal/collector/exchange/exchange_ad_access_processes.go
@@ -18,31 +18,37 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
+ "github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- ldapReadTime = "LDAP Read Time"
- ldapSearchTime = "LDAP Search Time"
- ldapWriteTime = "LDAP Write Time"
- ldapTimeoutErrorsPerSec = "LDAP Timeout Errors/sec"
- longRunningLDAPOperationsPerMin = "Long Running LDAP Operations/min"
-)
+type collectorADAccessProcesses struct {
+ perfDataCollectorADAccessProcesses *pdh.Collector
+ perfDataObjectADAccessProcesses []perfDataCounterValuesADAccessProcesses
-func (c *Collector) buildADAccessProcesses() error {
- counters := []string{
- ldapReadTime,
- ldapSearchTime,
- ldapWriteTime,
- ldapTimeoutErrorsPerSec,
- longRunningLDAPOperationsPerMin,
- }
+ ldapReadTime *prometheus.Desc
+ ldapSearchTime *prometheus.Desc
+ ldapTimeoutErrorsPerSec *prometheus.Desc
+ ldapWriteTime *prometheus.Desc
+ longRunningLDAPOperationsPerMin *prometheus.Desc
+}
+
+type perfDataCounterValuesADAccessProcesses struct {
+ Name string
+ LdapReadTime float64 `perfdata:"LDAP Read Time"`
+ LdapSearchTime float64 `perfdata:"LDAP Search Time"`
+ LdapWriteTime float64 `perfdata:"LDAP Write Time"`
+ LdapTimeoutErrorsPerSec float64 `perfdata:"LDAP Timeout Errors/sec"`
+ LongRunningLDAPOperationsPerMin float64 `perfdata:"Long Running LDAP Operations/min"`
+}
+
+func (c *Collector) buildADAccessProcesses() error {
var err error
- c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector("MSExchange ADAccess Processes", perfdata.InstancesAll, counters)
+ c.perfDataCollectorADAccessProcesses, err = pdh.NewCollector[perfDataCounterValuesADAccessProcesses]("MSExchange ADAccess Processes", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
}
@@ -82,19 +88,15 @@ func (c *Collector) buildADAccessProcesses() error {
}
func (c *Collector) collectADAccessProcesses(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorADAccessProcesses.Collect()
+ err := c.perfDataCollectorADAccessProcesses.Collect(&c.perfDataObjectADAccessProcesses)
if err != nil {
return fmt.Errorf("failed to collect MSExchange ADAccess Processes metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange ADAccess Processes metrics: %w", types.ErrNoData)
- }
-
labelUseCount := make(map[string]int)
- for name, data := range perfData {
- labelName := c.toLabelName(name)
+ for _, data := range c.perfDataObjectADAccessProcesses {
+ labelName := c.toLabelName(data.Name)
// Since we're not including the PID suffix from the instance names in the label names, we get an occasional duplicate.
// This seems to affect about 4 instances only of this object.
@@ -106,31 +108,31 @@ func (c *Collector) collectADAccessProcesses(ch chan<- prometheus.Metric) error
ch <- prometheus.MustNewConstMetric(
c.ldapReadTime,
prometheus.CounterValue,
- c.msToSec(data[ldapReadTime].FirstValue),
+ utils.MilliSecToSec(data.LdapReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapSearchTime,
prometheus.CounterValue,
- c.msToSec(data[ldapSearchTime].FirstValue),
+ utils.MilliSecToSec(data.LdapSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWriteTime,
prometheus.CounterValue,
- c.msToSec(data[ldapWriteTime].FirstValue),
+ utils.MilliSecToSec(data.LdapWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapTimeoutErrorsPerSec,
prometheus.CounterValue,
- data[ldapTimeoutErrorsPerSec].FirstValue,
+ data.LdapTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.longRunningLDAPOperationsPerMin,
prometheus.CounterValue,
- data[longRunningLDAPOperationsPerMin].FirstValue*60,
+ data.LongRunningLDAPOperationsPerMin*60,
labelName,
)
}
diff --git a/internal/collector/exchange/exchange_autodiscover.go b/internal/collector/exchange/exchange_autodiscover.go
index 5e5f48131..a0a29510b 100644
--- a/internal/collector/exchange/exchange_autodiscover.go
+++ b/internal/collector/exchange/exchange_autodiscover.go
@@ -18,19 +18,26 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-func (c *Collector) buildAutoDiscover() error {
- counters := []string{
- requestsPerSec,
- }
+type collectorAutoDiscover struct {
+ perfDataCollectorAutoDiscover *pdh.Collector
+ perfDataObjectAutoDiscover []perfDataCounterValuesAutoDiscover
+ autoDiscoverRequestsPerSec *prometheus.Desc
+}
+
+type perfDataCounterValuesAutoDiscover struct {
+ RequestsPerSec float64 `perfdata:"Requests/sec"`
+}
+
+func (c *Collector) buildAutoDiscover() error {
var err error
- c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector("MSExchange Autodiscover", perfdata.InstancesAll, counters)
+ c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover]("MSExchange Autodiscover", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
}
@@ -46,20 +53,16 @@ func (c *Collector) buildAutoDiscover() error {
}
func (c *Collector) collectAutoDiscover(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorAutoDiscover.Collect()
+ err := c.perfDataCollectorAutoDiscover.Collect(&c.perfDataObjectAutoDiscover)
if err != nil {
return fmt.Errorf("failed to collect MSExchange Autodiscover metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange Autodiscover metrics: %w", types.ErrNoData)
- }
-
- for _, data := range perfData {
+ for _, data := range c.perfDataObjectAutoDiscover {
ch <- prometheus.MustNewConstMetric(
c.autoDiscoverRequestsPerSec,
prometheus.CounterValue,
- data[requestsPerSec].FirstValue,
+ data.RequestsPerSec,
)
}
diff --git a/internal/collector/exchange/exchange_availability_service.go b/internal/collector/exchange/exchange_availability_service.go
index 7e57ee892..31efa42db 100644
--- a/internal/collector/exchange/exchange_availability_service.go
+++ b/internal/collector/exchange/exchange_availability_service.go
@@ -18,19 +18,26 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-func (c *Collector) buildAvailabilityService() error {
- counters := []string{
- requestsPerSec,
- }
+type collectorAvailabilityService struct {
+ perfDataCollectorAvailabilityService *pdh.Collector
+ perfDataObjectAvailabilityService []perfDataCounterValuesAvailabilityService
+ availabilityRequestsSec *prometheus.Desc
+}
+
+type perfDataCounterValuesAvailabilityService struct {
+ RequestsPerSec float64 `perfdata:"Requests/sec"`
+}
+
+func (c *Collector) buildAvailabilityService() error {
var err error
- c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector("MSExchange Availability Service", perfdata.InstancesAll, counters)
+ c.perfDataCollectorAvailabilityService, err = pdh.NewCollector[perfDataCounterValuesAvailabilityService]("MSExchange Availability Service", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
}
@@ -46,20 +53,16 @@ func (c *Collector) buildAvailabilityService() error {
}
func (c *Collector) collectAvailabilityService(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorAvailabilityService.Collect()
+ err := c.perfDataCollectorAvailabilityService.Collect(&c.perfDataObjectAvailabilityService)
if err != nil {
return fmt.Errorf("failed to collect MSExchange Availability Service metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange Availability Service metrics: %w", types.ErrNoData)
- }
-
- for _, data := range perfData {
+ for _, data := range c.perfDataObjectAvailabilityService {
ch <- prometheus.MustNewConstMetric(
c.availabilityRequestsSec,
prometheus.CounterValue,
- data[requestsPerSec].FirstValue,
+ data.RequestsPerSec,
)
}
diff --git a/internal/collector/exchange/exchange_http_proxy.go b/internal/collector/exchange/exchange_http_proxy.go
index 4e4691bce..c42f5cda4 100644
--- a/internal/collector/exchange/exchange_http_proxy.go
+++ b/internal/collector/exchange/exchange_http_proxy.go
@@ -18,33 +18,39 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
+ "github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- mailboxServerLocatorAverageLatency = "MailboxServerLocator Average Latency (Moving Average)"
- averageAuthenticationLatency = "Average Authentication Latency"
- averageCASProcessingLatency = "Average ClientAccess Server Processing Latency"
- mailboxServerProxyFailureRate = "Mailbox Server Proxy Failure Rate"
- outstandingProxyRequests = "Outstanding Proxy Requests"
- proxyRequestsPerSec = "Proxy Requests/Sec"
-)
+type collectorHTTPProxy struct {
+ perfDataCollectorHTTPProxy *pdh.Collector
+ perfDataObjectHTTPProxy []perfDataCounterValuesHTTPProxy
-func (c *Collector) buildHTTPProxy() error {
- counters := []string{
- mailboxServerLocatorAverageLatency,
- averageAuthenticationLatency,
- averageCASProcessingLatency,
- mailboxServerProxyFailureRate,
- outstandingProxyRequests,
- proxyRequestsPerSec,
- }
+ mailboxServerLocatorAverageLatency *prometheus.Desc
+ averageAuthenticationLatency *prometheus.Desc
+ outstandingProxyRequests *prometheus.Desc
+ proxyRequestsPerSec *prometheus.Desc
+ averageCASProcessingLatency *prometheus.Desc
+ mailboxServerProxyFailureRate *prometheus.Desc
+}
+
+type perfDataCounterValuesHTTPProxy struct {
+ Name string
+ MailboxServerLocatorAverageLatency float64 `perfdata:"MailboxServerLocator Average Latency (Moving Average)"`
+ AverageAuthenticationLatency float64 `perfdata:"Average Authentication Latency"`
+ AverageCASProcessingLatency float64 `perfdata:"Average ClientAccess Server Processing Latency"`
+ MailboxServerProxyFailureRate float64 `perfdata:"Mailbox Server Proxy Failure Rate"`
+ OutstandingProxyRequests float64 `perfdata:"Outstanding Proxy Requests"`
+ ProxyRequestsPerSec float64 `perfdata:"Proxy Requests/Sec"`
+}
+
+func (c *Collector) buildHTTPProxy() error {
var err error
- c.perfDataCollectorHttpProxy, err = perfdata.NewCollector("MSExchange HttpProxy", perfdata.InstancesAll, counters)
+ c.perfDataCollectorHTTPProxy, err = pdh.NewCollector[perfDataCounterValuesHTTPProxy]("MSExchange HttpProxy", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
}
@@ -90,51 +96,47 @@ func (c *Collector) buildHTTPProxy() error {
}
func (c *Collector) collectHTTPProxy(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorHttpProxy.Collect()
+ err := c.perfDataCollectorHTTPProxy.Collect(&c.perfDataObjectHTTPProxy)
if err != nil {
return fmt.Errorf("failed to collect MSExchange HttpProxy Service metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange HttpProxy Service metrics: %w", types.ErrNoData)
- }
-
- for name, data := range perfData {
- labelName := c.toLabelName(name)
+ for _, data := range c.perfDataObjectHTTPProxy {
+ labelName := c.toLabelName(data.Name)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
- c.msToSec(data[mailboxServerLocatorAverageLatency].FirstValue),
+ utils.MilliSecToSec(data.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageAuthenticationLatency,
prometheus.GaugeValue,
- data[averageAuthenticationLatency].FirstValue,
+ data.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageCASProcessingLatency,
prometheus.GaugeValue,
- c.msToSec(data[averageCASProcessingLatency].FirstValue),
+ utils.MilliSecToSec(data.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerProxyFailureRate,
prometheus.GaugeValue,
- data[mailboxServerProxyFailureRate].FirstValue,
+ data.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.outstandingProxyRequests,
prometheus.GaugeValue,
- data[outstandingProxyRequests].FirstValue,
+ data.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.proxyRequestsPerSec,
prometheus.CounterValue,
- data[proxyRequestsPerSec].FirstValue,
+ data.ProxyRequestsPerSec,
labelName,
)
}
diff --git a/internal/collector/exchange/exchange_mapi_http_emsmdb.go b/internal/collector/exchange/exchange_mapi_http_emsmdb.go
index a36c1e372..34bc6cda3 100644
--- a/internal/collector/exchange/exchange_mapi_http_emsmdb.go
+++ b/internal/collector/exchange/exchange_mapi_http_emsmdb.go
@@ -18,23 +18,26 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- activeUserCount = "Active User Count"
-)
+type collectorMapiHttpEmsmdb struct {
+ perfDataCollectorMapiHttpEmsmdb *pdh.Collector
+ perfDataObjectMapiHttpEmsmdb []perfDataCounterValuesMapiHttpEmsmdb
-func (c *Collector) buildMapiHttpEmsmdb() error {
- counters := []string{
- activeUserCount,
- }
+ activeUserCountMapiHttpEmsMDB *prometheus.Desc
+}
+type perfDataCounterValuesMapiHttpEmsmdb struct {
+ ActiveUserCount float64 `perfdata:"Active User Count"`
+}
+
+func (c *Collector) buildMapiHttpEmsmdb() error {
var err error
- c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector("MSExchange MapiHttp Emsmdb", perfdata.InstancesAll, counters)
+ c.perfDataCollectorMapiHttpEmsmdb, err = pdh.NewCollector[perfDataCounterValuesMapiHttpEmsmdb]("MSExchange MapiHttp Emsmdb", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
}
@@ -50,20 +53,16 @@ func (c *Collector) buildMapiHttpEmsmdb() error {
}
func (c *Collector) collectMapiHttpEmsmdb(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorMapiHttpEmsmdb.Collect()
+ err := c.perfDataCollectorMapiHttpEmsmdb.Collect(&c.perfDataObjectMapiHttpEmsmdb)
if err != nil {
return fmt.Errorf("failed to collect MSExchange MapiHttp Emsmdb metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange MapiHttp Emsmdb metrics: %w", types.ErrNoData)
- }
-
- for _, data := range perfData {
+ for _, data := range c.perfDataObjectMapiHttpEmsmdb {
ch <- prometheus.MustNewConstMetric(
c.activeUserCountMapiHttpEmsMDB,
prometheus.GaugeValue,
- data[activeUserCount].FirstValue,
+ data.ActiveUserCount,
)
}
diff --git a/internal/collector/exchange/exchange_outlook_web_access.go b/internal/collector/exchange/exchange_outlook_web_access.go
index e3f1a4995..c4c4e60cf 100644
--- a/internal/collector/exchange/exchange_outlook_web_access.go
+++ b/internal/collector/exchange/exchange_outlook_web_access.go
@@ -18,25 +18,28 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- currentUniqueUsers = "Current Unique Users"
- // requestsPerSec = "Requests/sec"
-)
+type collectorOWA struct {
+ perfDataCollectorOWA *pdh.Collector
+ perfDataObjectOWA []perfDataCounterValuesOWA
-func (c *Collector) buildOWA() error {
- counters := []string{
- currentUniqueUsers,
- requestsPerSec,
- }
+ currentUniqueUsers *prometheus.Desc
+ owaRequestsPerSec *prometheus.Desc
+}
+type perfDataCounterValuesOWA struct {
+ CurrentUniqueUsers float64 `perfdata:"Current Unique Users"`
+ RequestsPerSec float64 `perfdata:"Requests/sec"`
+}
+
+func (c *Collector) buildOWA() error {
var err error
- c.perfDataCollectorOWA, err = perfdata.NewCollector("MSExchange OWA", perfdata.InstancesAll, counters)
+ c.perfDataCollectorOWA, err = pdh.NewCollector[perfDataCounterValuesOWA]("MSExchange OWA", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
}
@@ -58,25 +61,21 @@ func (c *Collector) buildOWA() error {
}
func (c *Collector) collectOWA(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorOWA.Collect()
+ err := c.perfDataCollectorOWA.Collect(&c.perfDataObjectOWA)
if err != nil {
return fmt.Errorf("failed to collect MSExchange OWA metrics: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange OWA metrics: %w", types.ErrNoData)
- }
-
- for _, data := range perfData {
+ for _, data := range c.perfDataObjectOWA {
ch <- prometheus.MustNewConstMetric(
c.currentUniqueUsers,
prometheus.GaugeValue,
- data[currentUniqueUsers].FirstValue,
+ data.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.owaRequestsPerSec,
prometheus.CounterValue,
- data[requestsPerSec].FirstValue,
+ data.RequestsPerSec,
)
}
diff --git a/internal/collector/exchange/exchange_rpc_client_access.go b/internal/collector/exchange/exchange_rpc_client_access.go
index f68ecaacc..b144d9339 100644
--- a/internal/collector/exchange/exchange_rpc_client_access.go
+++ b/internal/collector/exchange/exchange_rpc_client_access.go
@@ -18,33 +18,37 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
+ "github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- rpcAveragedLatency = "RPC Averaged Latency"
- rpcRequests = "RPC Requests"
- // activeUserCount = "Active User Count"
- connectionCount = "Connection Count"
- rpcOperationsPerSec = "RPC Operations/sec"
- userCount = "User Count"
-)
+type collectorRpcClientAccess struct {
+ perfDataCollectorRpcClientAccess *pdh.Collector
+ perfDataObjectRpcClientAccess []perfDataCounterValuesRpcClientAccess
-func (c *Collector) buildRPC() error {
- counters := []string{
- rpcAveragedLatency,
- rpcRequests,
- activeUserCount,
- connectionCount,
- rpcOperationsPerSec,
- userCount,
- }
+ activeUserCount *prometheus.Desc
+ connectionCount *prometheus.Desc
+ rpcAveragedLatency *prometheus.Desc
+ rpcOperationsPerSec *prometheus.Desc
+ rpcRequests *prometheus.Desc
+ userCount *prometheus.Desc
+}
+type perfDataCounterValuesRpcClientAccess struct {
+ RpcAveragedLatency float64 `perfdata:"RPC Averaged Latency"`
+ RpcRequests float64 `perfdata:"RPC Requests"`
+ ActiveUserCount float64 `perfdata:"Active User Count"`
+ ConnectionCount float64 `perfdata:"Connection Count"`
+ RpcOperationsPerSec float64 `perfdata:"RPC Operations/sec"`
+ UserCount float64 `perfdata:"User Count"`
+}
+
+func (c *Collector) buildRpcClientAccess() error {
var err error
- c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector("MSExchange RpcClientAccess", perfdata.InstancesAll, counters)
+ c.perfDataCollectorRpcClientAccess, err = pdh.NewCollector[perfDataCounterValuesRpcClientAccess]("MSExchange RpcClientAccess", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
}
@@ -89,46 +93,42 @@ func (c *Collector) buildRPC() error {
return nil
}
-func (c *Collector) collectRPC(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorRpcClientAccess.Collect()
+func (c *Collector) collectRpcClientAccess(ch chan<- prometheus.Metric) error {
+ err := c.perfDataCollectorRpcClientAccess.Collect(&c.perfDataObjectRpcClientAccess)
if err != nil {
return fmt.Errorf("failed to collect MSExchange RpcClientAccess: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange RpcClientAccess metrics: %w", types.ErrNoData)
- }
-
- for _, data := range perfData {
+ for _, data := range c.perfDataObjectRpcClientAccess {
ch <- prometheus.MustNewConstMetric(
c.rpcAveragedLatency,
prometheus.GaugeValue,
- c.msToSec(data[rpcAveragedLatency].FirstValue),
+ utils.MilliSecToSec(data.RpcAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.rpcRequests,
prometheus.GaugeValue,
- data[rpcRequests].FirstValue,
+ data.RpcRequests,
)
ch <- prometheus.MustNewConstMetric(
c.activeUserCount,
prometheus.GaugeValue,
- data[activeUserCount].FirstValue,
+ data.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCount,
prometheus.GaugeValue,
- data[connectionCount].FirstValue,
+ data.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.rpcOperationsPerSec,
prometheus.CounterValue,
- data[rpcOperationsPerSec].FirstValue,
+ data.RpcOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.userCount,
prometheus.GaugeValue,
- data[userCount].FirstValue,
+ data.UserCount,
)
}
diff --git a/internal/collector/exchange/exchange_transport_queues.go b/internal/collector/exchange/exchange_transport_queues.go
index 34b6db683..1eea43cba 100644
--- a/internal/collector/exchange/exchange_transport_queues.go
+++ b/internal/collector/exchange/exchange_transport_queues.go
@@ -18,59 +18,64 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- externalActiveRemoteDeliveryQueueLength = "External Active Remote Delivery Queue Length"
- internalActiveRemoteDeliveryQueueLength = "Internal Active Remote Delivery Queue Length"
- activeMailboxDeliveryQueueLength = "Active Mailbox Delivery Queue Length"
- retryMailboxDeliveryQueueLength = "Retry Mailbox Delivery Queue Length"
- unreachableQueueLength = "Unreachable Queue Length"
- externalLargestDeliveryQueueLength = "External Largest Delivery Queue Length"
- internalLargestDeliveryQueueLength = "Internal Largest Delivery Queue Length"
- poisonQueueLength = "Poison Queue Length"
- messagesQueuedForDeliveryTotal = "Messages Queued For Delivery Total"
- messagesSubmittedTotal = "Messages Submitted Total"
- messagesDelayedTotal = "Messages Delayed Total"
- messagesCompletedDeliveryTotal = "Messages Completed Delivery Total"
- shadowQueueLength = "Shadow Queue Length"
- submissionQueueLength = "Submission Queue Length"
- delayQueueLength = "Delay Queue Length"
- itemsCompletedDeliveryTotal = "Items Completed Delivery Total"
- itemsQueuedForDeliveryExpiredTotal = "Items Queued For Delivery Expired Total"
- itemsQueuedForDeliveryTotal = "Items Queued For Delivery Total"
- itemsResubmittedTotal = "Items Resubmitted Total"
-)
+type collectorTransportQueues struct {
+ perfDataCollectorTransportQueues *pdh.Collector
+ perfDataObjectTransportQueues []perfDataCounterValuesTransportQueues
-func (c *Collector) buildTransportQueues() error {
- counters := []string{
- externalActiveRemoteDeliveryQueueLength,
- internalActiveRemoteDeliveryQueueLength,
- activeMailboxDeliveryQueueLength,
- retryMailboxDeliveryQueueLength,
- unreachableQueueLength,
- externalLargestDeliveryQueueLength,
- internalLargestDeliveryQueueLength,
- poisonQueueLength,
- messagesQueuedForDeliveryTotal,
- messagesSubmittedTotal,
- messagesDelayedTotal,
- messagesCompletedDeliveryTotal,
- shadowQueueLength,
- submissionQueueLength,
- delayQueueLength,
- itemsCompletedDeliveryTotal,
- itemsQueuedForDeliveryExpiredTotal,
- itemsQueuedForDeliveryTotal,
- itemsResubmittedTotal,
- }
+ activeMailboxDeliveryQueueLength *prometheus.Desc
+ externalActiveRemoteDeliveryQueueLength *prometheus.Desc
+ externalLargestDeliveryQueueLength *prometheus.Desc
+ internalActiveRemoteDeliveryQueueLength *prometheus.Desc
+ internalLargestDeliveryQueueLength *prometheus.Desc
+ poisonQueueLength *prometheus.Desc
+ retryMailboxDeliveryQueueLength *prometheus.Desc
+ unreachableQueueLength *prometheus.Desc
+ messagesQueuedForDeliveryTotal *prometheus.Desc
+ messagesSubmittedTotal *prometheus.Desc
+ messagesDelayedTotal *prometheus.Desc
+ messagesCompletedDeliveryTotal *prometheus.Desc
+ shadowQueueLength *prometheus.Desc
+ submissionQueueLength *prometheus.Desc
+ delayQueueLength *prometheus.Desc
+ itemsCompletedDeliveryTotal *prometheus.Desc
+ itemsQueuedForDeliveryExpiredTotal *prometheus.Desc
+ itemsQueuedForDeliveryTotal *prometheus.Desc
+ itemsResubmittedTotal *prometheus.Desc
+}
+
+type perfDataCounterValuesTransportQueues struct {
+ Name string
+ ExternalActiveRemoteDeliveryQueueLength float64 `perfdata:"External Active Remote Delivery Queue Length"`
+ InternalActiveRemoteDeliveryQueueLength float64 `perfdata:"Internal Active Remote Delivery Queue Length"`
+ ActiveMailboxDeliveryQueueLength float64 `perfdata:"Active Mailbox Delivery Queue Length"`
+ RetryMailboxDeliveryQueueLength float64 `perfdata:"Retry Mailbox Delivery Queue Length"`
+ UnreachableQueueLength float64 `perfdata:"Unreachable Queue Length"`
+ ExternalLargestDeliveryQueueLength float64 `perfdata:"External Largest Delivery Queue Length"`
+ InternalLargestDeliveryQueueLength float64 `perfdata:"Internal Largest Delivery Queue Length"`
+ PoisonQueueLength float64 `perfdata:"Poison Queue Length"`
+ MessagesQueuedForDeliveryTotal float64 `perfdata:"Messages Queued For Delivery Total"`
+ MessagesSubmittedTotal float64 `perfdata:"Messages Submitted Total"`
+ MessagesDelayedTotal float64 `perfdata:"Messages Delayed Total"`
+ MessagesCompletedDeliveryTotal float64 `perfdata:"Messages Completed Delivery Total"`
+ ShadowQueueLength float64 `perfdata:"Shadow Queue Length"`
+ SubmissionQueueLength float64 `perfdata:"Submission Queue Length"`
+ DelayQueueLength float64 `perfdata:"Delay Queue Length"`
+ ItemsCompletedDeliveryTotal float64 `perfdata:"Items Completed Delivery Total"`
+ ItemsQueuedForDeliveryExpiredTotal float64 `perfdata:"Items Queued For Delivery Expired Total"`
+ ItemsQueuedForDeliveryTotal float64 `perfdata:"Items Queued For Delivery Total"`
+ ItemsResubmittedTotal float64 `perfdata:"Items Resubmitted Total"`
+}
+
+func (c *Collector) buildTransportQueues() error {
var err error
- c.perfDataCollectorTransportQueues, err = perfdata.NewCollector("MSExchangeTransport Queues", perfdata.InstancesAll, counters)
+ c.perfDataCollectorTransportQueues, err = pdh.NewCollector[perfDataCounterValuesTransportQueues]("MSExchangeTransport Queues", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
}
@@ -194,130 +199,126 @@ func (c *Collector) buildTransportQueues() error {
}
func (c *Collector) collectTransportQueues(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorTransportQueues.Collect()
+ err := c.perfDataCollectorTransportQueues.Collect(&c.perfDataObjectTransportQueues)
if err != nil {
return fmt.Errorf("failed to collect MSExchangeTransport Queues: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchangeTransport Queues metrics: %w", types.ErrNoData)
- }
-
- for name, data := range perfData {
- labelName := c.toLabelName(name)
+ for _, data := range c.perfDataObjectTransportQueues {
+ labelName := c.toLabelName(data.Name)
ch <- prometheus.MustNewConstMetric(
c.externalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
- data[externalActiveRemoteDeliveryQueueLength].FirstValue,
+ data.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
- data[internalActiveRemoteDeliveryQueueLength].FirstValue,
+ data.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.activeMailboxDeliveryQueueLength,
prometheus.GaugeValue,
- data[activeMailboxDeliveryQueueLength].FirstValue,
+ data.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.retryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
- data[retryMailboxDeliveryQueueLength].FirstValue,
+ data.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.unreachableQueueLength,
prometheus.GaugeValue,
- data[unreachableQueueLength].FirstValue,
+ data.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.externalLargestDeliveryQueueLength,
prometheus.GaugeValue,
- data[externalLargestDeliveryQueueLength].FirstValue,
+ data.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalLargestDeliveryQueueLength,
prometheus.GaugeValue,
- data[internalLargestDeliveryQueueLength].FirstValue,
+ data.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.poisonQueueLength,
prometheus.GaugeValue,
- data[poisonQueueLength].FirstValue,
+ data.PoisonQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesQueuedForDeliveryTotal,
prometheus.CounterValue,
- data[messagesQueuedForDeliveryTotal].FirstValue,
+ data.MessagesQueuedForDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesSubmittedTotal,
prometheus.CounterValue,
- data[messagesSubmittedTotal].FirstValue,
+ data.MessagesSubmittedTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesDelayedTotal,
prometheus.CounterValue,
- data[messagesDelayedTotal].FirstValue,
+ data.MessagesDelayedTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesCompletedDeliveryTotal,
prometheus.CounterValue,
- data[messagesCompletedDeliveryTotal].FirstValue,
+ data.MessagesCompletedDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.shadowQueueLength,
prometheus.GaugeValue,
- data[shadowQueueLength].FirstValue,
+ data.ShadowQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.submissionQueueLength,
prometheus.GaugeValue,
- data[submissionQueueLength].FirstValue,
+ data.SubmissionQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.delayQueueLength,
prometheus.GaugeValue,
- data[delayQueueLength].FirstValue,
+ data.DelayQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsCompletedDeliveryTotal,
prometheus.CounterValue,
- data[itemsCompletedDeliveryTotal].FirstValue,
+ data.ItemsCompletedDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsQueuedForDeliveryExpiredTotal,
prometheus.CounterValue,
- data[itemsQueuedForDeliveryExpiredTotal].FirstValue,
+ data.ItemsQueuedForDeliveryExpiredTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsQueuedForDeliveryTotal,
prometheus.CounterValue,
- data[itemsQueuedForDeliveryTotal].FirstValue,
+ data.ItemsQueuedForDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsResubmittedTotal,
prometheus.CounterValue,
- data[itemsResubmittedTotal].FirstValue,
+ data.ItemsResubmittedTotal,
labelName,
)
}
diff --git a/internal/collector/exchange/exchange_workload_management.go b/internal/collector/exchange/exchange_workload_management.go
index 5094b02d4..ccedfcdcd 100644
--- a/internal/collector/exchange/exchange_workload_management.go
+++ b/internal/collector/exchange/exchange_workload_management.go
@@ -18,31 +18,36 @@ package exchange
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- activeTasks = "ActiveTasks"
- completedTasks = "CompletedTasks"
- queuedTasks = "QueuedTasks"
- yieldedTasks = "YieldedTasks"
- isActive = "Active"
-)
+type collectorWorkloadManagementWorkloads struct {
+ perfDataCollectorWorkloadManagementWorkloads *pdh.Collector
+ perfDataObjectWorkloadManagementWorkloads []perfDataCounterValuesWorkloadManagementWorkloads
-func (c *Collector) buildWorkloadManagementWorkloads() error {
- counters := []string{
- activeTasks,
- completedTasks,
- queuedTasks,
- yieldedTasks,
- isActive,
- }
+ activeTasks *prometheus.Desc
+ isActive *prometheus.Desc
+ completedTasks *prometheus.Desc
+ queuedTasks *prometheus.Desc
+ yieldedTasks *prometheus.Desc
+}
+
+type perfDataCounterValuesWorkloadManagementWorkloads struct {
+ Name string
+ ActiveTasks float64 `perfdata:"ActiveTasks"`
+ CompletedTasks float64 `perfdata:"CompletedTasks"`
+ QueuedTasks float64 `perfdata:"QueuedTasks"`
+ YieldedTasks float64 `perfdata:"YieldedTasks"`
+ IsActive float64 `perfdata:"Active"`
+}
+
+func (c *Collector) buildWorkloadManagementWorkloads() error {
var err error
- c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector("MSExchange WorkloadManagement Workloads", perfdata.InstancesAll, counters)
+ c.perfDataCollectorWorkloadManagementWorkloads, err = pdh.NewCollector[perfDataCounterValuesWorkloadManagementWorkloads]("MSExchange WorkloadManagement Workloads", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
}
@@ -82,46 +87,42 @@ func (c *Collector) buildWorkloadManagementWorkloads() error {
}
func (c *Collector) collectWorkloadManagementWorkloads(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorWorkloadManagementWorkloads.Collect()
+ err := c.perfDataCollectorWorkloadManagementWorkloads.Collect(&c.perfDataObjectWorkloadManagementWorkloads)
if err != nil {
return fmt.Errorf("failed to collect MSExchange WorkloadManagement Workloads: %w", err)
}
- if len(perfData) == 0 {
- return fmt.Errorf("failed to collect MSExchange WorkloadManagement Workloads metrics: %w", types.ErrNoData)
- }
-
- for name, data := range perfData {
- labelName := c.toLabelName(name)
+ for _, data := range c.perfDataObjectWorkloadManagementWorkloads {
+ labelName := c.toLabelName(data.Name)
ch <- prometheus.MustNewConstMetric(
c.activeTasks,
prometheus.GaugeValue,
- data[activeTasks].FirstValue,
+ data.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.completedTasks,
prometheus.CounterValue,
- data[completedTasks].FirstValue,
+ data.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.queuedTasks,
prometheus.CounterValue,
- data[queuedTasks].FirstValue,
+ data.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.yieldedTasks,
prometheus.CounterValue,
- data[yieldedTasks].FirstValue,
+ data.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.isActive,
prometheus.GaugeValue,
- data[isActive].FirstValue,
+ data.IsActive,
labelName,
)
}
diff --git a/internal/collector/hyperv/hyperv_datastore.go b/internal/collector/hyperv/hyperv_datastore.go
index d5571f535..8ea41d674 100644
--- a/internal/collector/hyperv/hyperv_datastore.go
+++ b/internal/collector/hyperv/hyperv_datastore.go
@@ -18,14 +18,15 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorDataStore Hyper-V DataStore metrics
type collectorDataStore struct {
- perfDataCollectorDataStore *perfdata.Collector
+ perfDataCollectorDataStore *pdh.Collector
+ perfDataObjectDataStore []perfDataCounterValuesDataStore
dataStoreFragmentationRatio *prometheus.Desc // \Hyper-V DataStore(*)\Fragmentation ratio
dataStoreSectorSize *prometheus.Desc // \Hyper-V DataStore(*)\Sector size
@@ -75,107 +76,61 @@ type collectorDataStore struct {
dataStoreSetOperationCount *prometheus.Desc // \Hyper-V DataStore(*)\Set operation count
}
-const (
+type perfDataCounterValuesDataStore struct {
+ Name string
// Hyper-V DataStore metrics
- dataStoreFragmentationRatio = "Fragmentation ratio"
- dataStoreSectorSize = "Sector size"
- dataStoreDataAlignment = "Data alignment"
- dataStoreCurrentReplayLogSize = "Current replay logSize"
- dataStoreAvailableEntries = "Number of available entries inside object tables"
- dataStoreEmptyEntries = "Number of empty entries inside object tables"
- dataStoreFreeBytes = "Number of free bytes inside key tables"
- dataStoreDataEnd = "Data end"
- dataStoreFileObjects = "Number of file objects"
- dataStoreObjectTables = "Number of object tables"
- dataStoreKeyTables = "Number of key tables"
- dataStoreFileDataSize = "File data size in bytes"
- dataStoreTableDataSize = "Table data size in bytes"
- dataStoreNamesSize = "Names size in bytes"
- dataStoreNumberOfKeys = "Number of keys"
- dataStoreReconnectLatencyMicro = "Reconnect latency microseconds"
- dataStoreDisconnectCount = "Disconnect count"
- dataStoreWriteToFileByteLatency = "Write to file byte latency microseconds"
- dataStoreWriteToFileByteCount = "Write to file byte count"
- dataStoreWriteToFileCount = "Write to file count"
- dataStoreReadFromFileByteLatency = "Read from file byte latency microseconds"
- dataStoreReadFromFileByteCount = "Read from file byte count"
- dataStoreReadFromFileCount = "Read from file count"
- dataStoreWriteToStorageByteLatency = "Write to storage byte latency microseconds"
- dataStoreWriteToStorageByteCount = "Write to storage byte count"
- dataStoreWriteToStorageCount = "Write to storage count"
- dataStoreReadFromStorageByteLatency = "Read from storage byte latency microseconds"
- dataStoreReadFromStorageByteCount = "Read from storage byte count"
- dataStoreReadFromStorageCount = "Read from storage count"
- dataStoreCommitByteLatency = "Commit byte latency microseconds"
- dataStoreCommitByteCount = "Commit byte count"
- dataStoreCommitCount = "Commit count"
- dataStoreCacheUpdateOperationLatency = "Cache update operation latency microseconds"
- dataStoreCacheUpdateOperationCount = "Cache update operation count"
- dataStoreCommitOperationLatency = "Commit operation latency microseconds"
- dataStoreCommitOperationCount = "Commit operation count"
- dataStoreCompactOperationLatency = "Compact operation latency microseconds"
- dataStoreCompactOperationCount = "Compact operation count"
- dataStoreLoadFileOperationLatency = "Load file operation latency microseconds"
- dataStoreLoadFileOperationCount = "Load file operation count"
- dataStoreRemoveOperationLatency = "Remove operation latency microseconds"
- dataStoreRemoveOperationCount = "Remove operation count"
- dataStoreQuerySizeOperationLatency = "Query size operation latency microseconds"
- dataStoreQuerySizeOperationCount = "Query size operation count"
- dataStoreSetOperationLatencyMicro = "Set operation latency microseconds"
- dataStoreSetOperationCount = "Set operation count"
-)
+ DataStoreFragmentationRatio float64 `perfdata:"Fragmentation ratio"`
+ DataStoreSectorSize float64 `perfdata:"Sector size"`
+ DataStoreDataAlignment float64 `perfdata:"Data alignment"`
+ DataStoreCurrentReplayLogSize float64 `perfdata:"Current replay logSize"`
+ DataStoreAvailableEntries float64 `perfdata:"Number of available entries inside object tables"`
+ DataStoreEmptyEntries float64 `perfdata:"Number of empty entries inside object tables"`
+ DataStoreFreeBytes float64 `perfdata:"Number of free bytes inside key tables"`
+ DataStoreDataEnd float64 `perfdata:"Data end"`
+ DataStoreFileObjects float64 `perfdata:"Number of file objects"`
+ DataStoreObjectTables float64 `perfdata:"Number of object tables"`
+ DataStoreKeyTables float64 `perfdata:"Number of key tables"`
+ DataStoreFileDataSize float64 `perfdata:"File data size in bytes"`
+ DataStoreTableDataSize float64 `perfdata:"Table data size in bytes"`
+ DataStoreNamesSize float64 `perfdata:"Names size in bytes"`
+ DataStoreNumberOfKeys float64 `perfdata:"Number of keys"`
+ DataStoreReconnectLatencyMicro float64 `perfdata:"Reconnect latency microseconds"`
+ DataStoreDisconnectCount float64 `perfdata:"Disconnect count"`
+ DataStoreWriteToFileByteLatency float64 `perfdata:"Write to file byte latency microseconds"`
+ DataStoreWriteToFileByteCount float64 `perfdata:"Write to file byte count"`
+ DataStoreWriteToFileCount float64 `perfdata:"Write to file count"`
+ DataStoreReadFromFileByteLatency float64 `perfdata:"Read from file byte latency microseconds"`
+ DataStoreReadFromFileByteCount float64 `perfdata:"Read from file byte count"`
+ DataStoreReadFromFileCount float64 `perfdata:"Read from file count"`
+ DataStoreWriteToStorageByteLatency float64 `perfdata:"Write to storage byte latency microseconds"`
+ DataStoreWriteToStorageByteCount float64 `perfdata:"Write to storage byte count"`
+ DataStoreWriteToStorageCount float64 `perfdata:"Write to storage count"`
+ DataStoreReadFromStorageByteLatency float64 `perfdata:"Read from storage byte latency microseconds"`
+ DataStoreReadFromStorageByteCount float64 `perfdata:"Read from storage byte count"`
+ DataStoreReadFromStorageCount float64 `perfdata:"Read from storage count"`
+ DataStoreCommitByteLatency float64 `perfdata:"Commit byte latency microseconds"`
+ DataStoreCommitByteCount float64 `perfdata:"Commit byte count"`
+ DataStoreCommitCount float64 `perfdata:"Commit count"`
+ DataStoreCacheUpdateOperationLatency float64 `perfdata:"Cache update operation latency microseconds"`
+ DataStoreCacheUpdateOperationCount float64 `perfdata:"Cache update operation count"`
+ DataStoreCommitOperationLatency float64 `perfdata:"Commit operation latency microseconds"`
+ DataStoreCommitOperationCount float64 `perfdata:"Commit operation count"`
+ DataStoreCompactOperationLatency float64 `perfdata:"Compact operation latency microseconds"`
+ DataStoreCompactOperationCount float64 `perfdata:"Compact operation count"`
+ DataStoreLoadFileOperationLatency float64 `perfdata:"Load file operation latency microseconds"`
+ DataStoreLoadFileOperationCount float64 `perfdata:"Load file operation count"`
+ DataStoreRemoveOperationLatency float64 `perfdata:"Remove operation latency microseconds"`
+ DataStoreRemoveOperationCount float64 `perfdata:"Remove operation count"`
+ DataStoreQuerySizeOperationLatency float64 `perfdata:"Query size operation latency microseconds"`
+ DataStoreQuerySizeOperationCount float64 `perfdata:"Query size operation count"`
+ DataStoreSetOperationLatencyMicro float64 `perfdata:"Set operation latency microseconds"`
+ DataStoreSetOperationCount float64 `perfdata:"Set operation count"`
+}
func (c *Collector) buildDataStore() error {
var err error
- c.perfDataCollectorDataStore, err = perfdata.NewCollector("Hyper-V DataStore", perfdata.InstancesAll, []string{
- dataStoreFragmentationRatio,
- dataStoreSectorSize,
- dataStoreDataAlignment,
- dataStoreCurrentReplayLogSize,
- dataStoreAvailableEntries,
- dataStoreEmptyEntries,
- dataStoreFreeBytes,
- dataStoreDataEnd,
- dataStoreFileObjects,
- dataStoreObjectTables,
- dataStoreKeyTables,
- dataStoreFileDataSize,
- dataStoreTableDataSize,
- dataStoreNamesSize,
- dataStoreNumberOfKeys,
- dataStoreReconnectLatencyMicro,
- dataStoreDisconnectCount,
- dataStoreWriteToFileByteLatency,
- dataStoreWriteToFileByteCount,
- dataStoreWriteToFileCount,
- dataStoreReadFromFileByteLatency,
- dataStoreReadFromFileByteCount,
- dataStoreReadFromFileCount,
- dataStoreWriteToStorageByteLatency,
- dataStoreWriteToStorageByteCount,
- dataStoreWriteToStorageCount,
- dataStoreReadFromStorageByteLatency,
- dataStoreReadFromStorageByteCount,
- dataStoreReadFromStorageCount,
- dataStoreCommitByteLatency,
- dataStoreCommitByteCount,
- dataStoreCommitCount,
- dataStoreCacheUpdateOperationLatency,
- dataStoreCacheUpdateOperationCount,
- dataStoreCommitOperationLatency,
- dataStoreCommitOperationCount,
- dataStoreCompactOperationLatency,
- dataStoreCompactOperationCount,
- dataStoreLoadFileOperationLatency,
- dataStoreLoadFileOperationCount,
- dataStoreRemoveOperationLatency,
- dataStoreRemoveOperationCount,
- dataStoreQuerySizeOperationLatency,
- dataStoreQuerySizeOperationCount,
- dataStoreSetOperationLatencyMicro,
- dataStoreSetOperationCount,
- })
+ c.perfDataCollectorDataStore, err = pdh.NewCollector[perfDataCounterValuesDataStore]("Hyper-V DataStore", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V DataStore collector: %w", err)
}
@@ -461,332 +416,332 @@ func (c *Collector) buildDataStore() error {
}
func (c *Collector) collectDataStore(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorDataStore.Collect()
+ err := c.perfDataCollectorDataStore.Collect(&c.perfDataObjectDataStore)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V DataStore metrics: %w", err)
}
- for name, page := range data {
+ for _, data := range c.perfDataObjectDataStore {
ch <- prometheus.MustNewConstMetric(
c.dataStoreFragmentationRatio,
prometheus.GaugeValue,
- page[dataStoreFragmentationRatio].FirstValue,
- name,
+ data.DataStoreFragmentationRatio,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreSectorSize,
prometheus.GaugeValue,
- page[dataStoreSectorSize].FirstValue,
- name,
+ data.DataStoreSectorSize,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreDataAlignment,
prometheus.GaugeValue,
- page[dataStoreDataAlignment].FirstValue,
- name,
+ data.DataStoreDataAlignment,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCurrentReplayLogSize,
prometheus.GaugeValue,
- page[dataStoreCurrentReplayLogSize].FirstValue,
- name,
+ data.DataStoreCurrentReplayLogSize,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreAvailableEntries,
prometheus.GaugeValue,
- page[dataStoreAvailableEntries].FirstValue,
- name,
+ data.DataStoreAvailableEntries,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreEmptyEntries,
prometheus.GaugeValue,
- page[dataStoreEmptyEntries].FirstValue,
- name,
+ data.DataStoreEmptyEntries,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreFreeBytes,
prometheus.GaugeValue,
- page[dataStoreFreeBytes].FirstValue,
- name,
+ data.DataStoreFreeBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreDataEnd,
prometheus.GaugeValue,
- page[dataStoreDataEnd].FirstValue,
- name,
+ data.DataStoreDataEnd,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreFileObjects,
prometheus.GaugeValue,
- page[dataStoreFileObjects].FirstValue,
- name,
+ data.DataStoreFileObjects,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreObjectTables,
prometheus.GaugeValue,
- page[dataStoreObjectTables].FirstValue,
- name,
+ data.DataStoreObjectTables,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreKeyTables,
prometheus.GaugeValue,
- page[dataStoreKeyTables].FirstValue,
- name,
+ data.DataStoreKeyTables,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreFileDataSize,
prometheus.GaugeValue,
- page[dataStoreFileDataSize].FirstValue,
- name,
+ data.DataStoreFileDataSize,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreTableDataSize,
prometheus.GaugeValue,
- page[dataStoreTableDataSize].FirstValue,
- name,
+ data.DataStoreTableDataSize,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreNamesSize,
prometheus.GaugeValue,
- page[dataStoreNamesSize].FirstValue,
- name,
+ data.DataStoreNamesSize,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreNumberOfKeys,
prometheus.GaugeValue,
- page[dataStoreNumberOfKeys].FirstValue,
- name,
+ data.DataStoreNumberOfKeys,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReconnectLatencyMicro,
prometheus.GaugeValue,
- page[dataStoreReconnectLatencyMicro].FirstValue,
- name,
+ data.DataStoreReconnectLatencyMicro,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreDisconnectCount,
prometheus.CounterValue,
- page[dataStoreDisconnectCount].FirstValue,
- name,
+ data.DataStoreDisconnectCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreWriteToFileByteLatency,
prometheus.GaugeValue,
- page[dataStoreWriteToFileByteLatency].FirstValue,
- name,
+ data.DataStoreWriteToFileByteLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreWriteToFileByteCount,
prometheus.CounterValue,
- page[dataStoreWriteToFileByteCount].FirstValue,
- name,
+ data.DataStoreWriteToFileByteCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreWriteToFileCount,
prometheus.CounterValue,
- page[dataStoreWriteToFileCount].FirstValue,
- name,
+ data.DataStoreWriteToFileCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReadFromFileByteLatency,
prometheus.GaugeValue,
- page[dataStoreReadFromFileByteLatency].FirstValue,
- name,
+ data.DataStoreReadFromFileByteLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReadFromFileByteCount,
prometheus.CounterValue,
- page[dataStoreReadFromFileByteCount].FirstValue,
- name,
+ data.DataStoreReadFromFileByteCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReadFromFileCount,
prometheus.CounterValue,
- page[dataStoreReadFromFileCount].FirstValue,
- name,
+ data.DataStoreReadFromFileCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreWriteToStorageByteLatency,
prometheus.GaugeValue,
- page[dataStoreWriteToStorageByteLatency].FirstValue,
- name,
+ data.DataStoreWriteToStorageByteLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreWriteToStorageByteCount,
prometheus.CounterValue,
- page[dataStoreWriteToStorageByteCount].FirstValue,
- name,
+ data.DataStoreWriteToStorageByteCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreWriteToStorageCount,
prometheus.CounterValue,
- page[dataStoreWriteToStorageCount].FirstValue,
- name,
+ data.DataStoreWriteToStorageCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReadFromStorageByteLatency,
prometheus.GaugeValue,
- page[dataStoreReadFromStorageByteLatency].FirstValue,
- name,
+ data.DataStoreReadFromStorageByteLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReadFromStorageByteCount,
prometheus.CounterValue,
- page[dataStoreReadFromStorageByteCount].FirstValue,
- name,
+ data.DataStoreReadFromStorageByteCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreReadFromStorageCount,
prometheus.CounterValue,
- page[dataStoreReadFromStorageCount].FirstValue,
- name,
+ data.DataStoreReadFromStorageCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCommitByteLatency,
prometheus.GaugeValue,
- page[dataStoreCommitByteLatency].FirstValue,
- name,
+ data.DataStoreCommitByteLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCommitByteCount,
prometheus.CounterValue,
- page[dataStoreCommitByteCount].FirstValue,
- name,
+ data.DataStoreCommitByteCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCommitCount,
prometheus.CounterValue,
- page[dataStoreCommitCount].FirstValue,
- name,
+ data.DataStoreCommitCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCacheUpdateOperationLatency,
prometheus.GaugeValue,
- page[dataStoreCacheUpdateOperationLatency].FirstValue,
- name,
+ data.DataStoreCacheUpdateOperationLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCacheUpdateOperationCount,
prometheus.CounterValue,
- page[dataStoreCacheUpdateOperationCount].FirstValue,
- name,
+ data.DataStoreCacheUpdateOperationCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCommitOperationLatency,
prometheus.GaugeValue,
- page[dataStoreCommitOperationLatency].FirstValue,
- name,
+ data.DataStoreCommitOperationLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCommitOperationCount,
prometheus.CounterValue,
- page[dataStoreCommitOperationCount].FirstValue,
- name,
+ data.DataStoreCommitOperationCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCompactOperationLatency,
prometheus.GaugeValue,
- page[dataStoreCompactOperationLatency].FirstValue,
- name,
+ data.DataStoreCompactOperationLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreCompactOperationCount,
prometheus.CounterValue,
- page[dataStoreCompactOperationCount].FirstValue,
- name,
+ data.DataStoreCompactOperationCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreLoadFileOperationLatency,
prometheus.GaugeValue,
- page[dataStoreLoadFileOperationLatency].FirstValue,
- name,
+ data.DataStoreLoadFileOperationLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreLoadFileOperationCount,
prometheus.CounterValue,
- page[dataStoreLoadFileOperationCount].FirstValue,
- name,
+ data.DataStoreLoadFileOperationCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreRemoveOperationLatency,
prometheus.GaugeValue,
- page[dataStoreRemoveOperationLatency].FirstValue,
- name,
+ data.DataStoreRemoveOperationLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreRemoveOperationCount,
prometheus.CounterValue,
- page[dataStoreRemoveOperationCount].FirstValue,
- name,
+ data.DataStoreRemoveOperationCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreQuerySizeOperationLatency,
prometheus.GaugeValue,
- page[dataStoreQuerySizeOperationLatency].FirstValue,
- name,
+ data.DataStoreQuerySizeOperationLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreQuerySizeOperationCount,
prometheus.CounterValue,
- page[dataStoreQuerySizeOperationCount].FirstValue,
- name,
+ data.DataStoreQuerySizeOperationCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreSetOperationLatencyMicro,
prometheus.GaugeValue,
- page[dataStoreSetOperationLatencyMicro].FirstValue,
- name,
+ data.DataStoreSetOperationLatencyMicro,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dataStoreSetOperationCount,
prometheus.CounterValue,
- page[dataStoreSetOperationCount].FirstValue,
- name,
+ data.DataStoreSetOperationCount,
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go b/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go
index 86eb92824..fb02f4d5a 100644
--- a/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go
+++ b/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go
@@ -18,7 +18,7 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -26,31 +26,30 @@ import (
// collectorDynamicMemoryBalancer Hyper-V Dynamic Memory Balancer metrics
type collectorDynamicMemoryBalancer struct {
- perfDataCollectorDynamicMemoryBalancer *perfdata.Collector
+ perfDataCollectorDynamicMemoryBalancer *pdh.Collector
+ perfDataObjectDynamicMemoryBalancer []perfDataCounterValuesDynamicMemoryBalancer
+
vmDynamicMemoryBalancerAvailableMemoryForBalancing *prometheus.Desc // \Hyper-V Dynamic Memory Balancer(*)\Available Memory For Balancing
vmDynamicMemoryBalancerSystemCurrentPressure *prometheus.Desc // \Hyper-V Dynamic Memory Balancer(*)\System Current Pressure
vmDynamicMemoryBalancerAvailableMemory *prometheus.Desc // \Hyper-V Dynamic Memory Balancer(*)\Available Memory
vmDynamicMemoryBalancerAveragePressure *prometheus.Desc // \Hyper-V Dynamic Memory Balancer(*)\Average Pressure
}
-const (
+type perfDataCounterValuesDynamicMemoryBalancer struct {
+ Name string
+
// Hyper-V Dynamic Memory Balancer metrics
- vmDynamicMemoryBalancerAvailableMemory = "Available Memory"
- vmDynamicMemoryBalancerAvailableMemoryForBalancing = "Available Memory For Balancing"
- vmDynamicMemoryBalancerAveragePressure = "Average Pressure"
- vmDynamicMemoryBalancerSystemCurrentPressure = "System Current Pressure"
-)
+ VmDynamicMemoryBalancerAvailableMemory float64 `perfdata:"Available Memory"`
+ VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing"`
+ VmDynamicMemoryBalancerAveragePressure float64 `perfdata:"Average Pressure"`
+ VmDynamicMemoryBalancerSystemCurrentPressure float64 `perfdata:"System Current Pressure"`
+}
func (c *Collector) buildDynamicMemoryBalancer() error {
var err error
// https://learn.microsoft.com/en-us/archive/blogs/chrisavis/monitoring-dynamic-memory-in-windows-server-hyper-v-2012
- c.perfDataCollectorDynamicMemoryBalancer, err = perfdata.NewCollector("Hyper-V Dynamic Memory Balancer", perfdata.InstancesAll, []string{
- vmDynamicMemoryBalancerAvailableMemory,
- vmDynamicMemoryBalancerAvailableMemoryForBalancing,
- vmDynamicMemoryBalancerAveragePressure,
- vmDynamicMemoryBalancerSystemCurrentPressure,
- })
+ c.perfDataCollectorDynamicMemoryBalancer, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryBalancer]("Hyper-V Dynamic Memory Balancer", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err)
}
@@ -84,38 +83,38 @@ func (c *Collector) buildDynamicMemoryBalancer() error {
}
func (c *Collector) collectDynamicMemoryBalancer(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorDynamicMemoryBalancer.Collect()
+ err := c.perfDataCollectorDynamicMemoryBalancer.Collect(&c.perfDataObjectDynamicMemoryBalancer)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Dynamic Memory Balancer metrics: %w", err)
}
- for name, page := range data {
+ for _, data := range c.perfDataObjectDynamicMemoryBalancer {
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerAvailableMemory,
prometheus.GaugeValue,
- utils.MBToBytes(page[vmDynamicMemoryBalancerAvailableMemory].FirstValue),
- name,
+ utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemory),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
prometheus.GaugeValue,
- utils.MBToBytes(page[vmDynamicMemoryBalancerAvailableMemoryForBalancing].FirstValue),
- name,
+ utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerAveragePressure,
prometheus.GaugeValue,
- utils.PercentageToRatio(page[vmDynamicMemoryBalancerAveragePressure].FirstValue),
- name,
+ utils.PercentageToRatio(data.VmDynamicMemoryBalancerAveragePressure),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerSystemCurrentPressure,
prometheus.GaugeValue,
- utils.PercentageToRatio(page[vmDynamicMemoryBalancerSystemCurrentPressure].FirstValue),
- name,
+ utils.PercentageToRatio(data.VmDynamicMemoryBalancerSystemCurrentPressure),
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_dynamic_memory_vm.go b/internal/collector/hyperv/hyperv_dynamic_memory_vm.go
index 4c45cde77..665a846a0 100644
--- a/internal/collector/hyperv/hyperv_dynamic_memory_vm.go
+++ b/internal/collector/hyperv/hyperv_dynamic_memory_vm.go
@@ -18,7 +18,7 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -26,7 +26,9 @@ import (
// collectorDynamicMemoryVM Hyper-V Dynamic Memory VM metrics
type collectorDynamicMemoryVM struct {
- perfDataCollectorDynamicMemoryVM *perfdata.Collector
+ perfDataCollectorDynamicMemoryVM *pdh.Collector
+ perfDataObjectDynamicMemoryVM []perfDataCounterValuesDynamicMemoryVM
+
vmMemoryAddedMemory *prometheus.Desc // \Hyper-V Dynamic Memory VM(*)\Added Memory
vmMemoryCurrentPressure *prometheus.Desc // \Hyper-V Dynamic Memory VM(*)\Current Pressure
vmMemoryGuestVisiblePhysicalMemory *prometheus.Desc // \Hyper-V Dynamic Memory VM(*)\Guest Visible Physical Memory
@@ -39,35 +41,26 @@ type collectorDynamicMemoryVM struct {
vmMemoryGuestAvailableMemory *prometheus.Desc // \Hyper-V Dynamic Memory VM(*)\Guest Available Memory
}
-const (
+type perfDataCounterValuesDynamicMemoryVM struct {
+ Name string
+
// Hyper-V Dynamic Memory VM metrics
- vmMemoryAddedMemory = "Added Memory"
- vmMemoryCurrentPressure = "Current Pressure"
- vmMemoryGuestAvailableMemory = "Guest Available Memory"
- vmMemoryGuestVisiblePhysicalMemory = "Guest Visible Physical Memory"
- vmMemoryMaximumPressure = "Maximum Pressure"
- vmMemoryMemoryAddOperations = "Memory Add Operations"
- vmMemoryMemoryRemoveOperations = "Memory Remove Operations"
- vmMemoryMinimumPressure = "Minimum Pressure"
- vmMemoryPhysicalMemory = "Physical Memory"
- vmMemoryRemovedMemory = "Removed Memory"
-)
+ VmMemoryAddedMemory float64 `perfdata:"Added Memory"`
+ VmMemoryCurrentPressure float64 `perfdata:"Current Pressure"`
+ VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory"`
+ VmMemoryGuestVisiblePhysicalMemory float64 `perfdata:"Guest Visible Physical Memory"`
+ VmMemoryMaximumPressure float64 `perfdata:"Maximum Pressure"`
+ VmMemoryMemoryAddOperations float64 `perfdata:"Memory Add Operations"`
+ VmMemoryMemoryRemoveOperations float64 `perfdata:"Memory Remove Operations"`
+ VmMemoryMinimumPressure float64 `perfdata:"Minimum Pressure"`
+ VmMemoryPhysicalMemory float64 `perfdata:"Physical Memory"`
+ VmMemoryRemovedMemory float64 `perfdata:"Removed Memory"`
+}
func (c *Collector) buildDynamicMemoryVM() error {
var err error
- c.perfDataCollectorDynamicMemoryVM, err = perfdata.NewCollector("Hyper-V Dynamic Memory VM", perfdata.InstancesAll, []string{
- vmMemoryAddedMemory,
- vmMemoryCurrentPressure,
- vmMemoryGuestVisiblePhysicalMemory,
- vmMemoryMaximumPressure,
- vmMemoryMemoryAddOperations,
- vmMemoryMemoryRemoveOperations,
- vmMemoryMinimumPressure,
- vmMemoryPhysicalMemory,
- vmMemoryRemovedMemory,
- vmMemoryGuestAvailableMemory,
- })
+ c.perfDataCollectorDynamicMemoryVM, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryVM]("Hyper-V Dynamic Memory VM", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Dynamic Memory VM collector: %w", err)
}
@@ -137,80 +130,80 @@ func (c *Collector) buildDynamicMemoryVM() error {
}
func (c *Collector) collectDynamicMemoryVM(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorDynamicMemoryVM.Collect()
+ err := c.perfDataCollectorDynamicMemoryVM.Collect(&c.perfDataObjectDynamicMemoryVM)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Dynamic Memory VM metrics: %w", err)
}
- for vmName, vmData := range data {
+ for _, data := range c.perfDataObjectDynamicMemoryVM {
ch <- prometheus.MustNewConstMetric(
c.vmMemoryAddedMemory,
prometheus.CounterValue,
- utils.MBToBytes(vmData[vmMemoryAddedMemory].FirstValue),
- vmName,
+ utils.MBToBytes(data.VmMemoryAddedMemory),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryCurrentPressure,
prometheus.GaugeValue,
- utils.PercentageToRatio(vmData[vmMemoryCurrentPressure].FirstValue),
- vmName,
+ utils.PercentageToRatio(data.VmMemoryCurrentPressure),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryGuestAvailableMemory,
prometheus.GaugeValue,
- utils.MBToBytes(vmData[vmMemoryGuestAvailableMemory].FirstValue),
- vmName,
+ utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryGuestVisiblePhysicalMemory,
prometheus.GaugeValue,
- utils.MBToBytes(vmData[vmMemoryGuestVisiblePhysicalMemory].FirstValue),
- vmName,
+ utils.MBToBytes(data.VmMemoryGuestVisiblePhysicalMemory),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryMaximumPressure,
prometheus.GaugeValue,
- utils.PercentageToRatio(vmData[vmMemoryMaximumPressure].FirstValue),
- vmName,
+ utils.PercentageToRatio(data.VmMemoryMaximumPressure),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryMemoryAddOperations,
prometheus.CounterValue,
- vmData[vmMemoryMemoryAddOperations].FirstValue,
- vmName,
+ data.VmMemoryMemoryAddOperations,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryMemoryRemoveOperations,
prometheus.CounterValue,
- vmData[vmMemoryMemoryRemoveOperations].FirstValue,
- vmName,
+ data.VmMemoryMemoryRemoveOperations,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryMinimumPressure,
prometheus.GaugeValue,
- utils.PercentageToRatio(vmData[vmMemoryMinimumPressure].FirstValue),
- vmName,
+ utils.PercentageToRatio(data.VmMemoryMinimumPressure),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryPhysicalMemory,
prometheus.GaugeValue,
- utils.MBToBytes(vmData[vmMemoryPhysicalMemory].FirstValue),
- vmName,
+ utils.MBToBytes(data.VmMemoryPhysicalMemory),
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryRemovedMemory,
prometheus.CounterValue,
- utils.MBToBytes(vmData[vmMemoryRemovedMemory].FirstValue),
- vmName,
+ utils.MBToBytes(data.VmMemoryRemovedMemory),
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_hypervisor_logical_processor.go b/internal/collector/hyperv/hyperv_hypervisor_logical_processor.go
index 4d93cfd14..3fd60ad5f 100644
--- a/internal/collector/hyperv/hyperv_hypervisor_logical_processor.go
+++ b/internal/collector/hyperv/hyperv_hypervisor_logical_processor.go
@@ -19,14 +19,15 @@ import (
"fmt"
"strings"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorHypervisorLogicalProcessor Hyper-V Hypervisor Logical Processor metrics
type collectorHypervisorLogicalProcessor struct {
- perfDataCollectorHypervisorLogicalProcessor *perfdata.Collector
+ perfDataCollectorHypervisorLogicalProcessor *pdh.Collector
+ perfDataObjectHypervisorLogicalProcessor []perfDataCounterValuesHypervisorLogicalProcessor
// \Hyper-V Hypervisor Logical Processor(*)\% Guest Run Time
// \Hyper-V Hypervisor Logical Processor(*)\% Hypervisor Run Time
@@ -36,24 +37,20 @@ type collectorHypervisorLogicalProcessor struct {
hypervisorLogicalProcessorContextSwitches *prometheus.Desc // \Hyper-V Hypervisor Logical Processor(*)\Context Switches/sec
}
-const (
- hypervisorLogicalProcessorGuestRunTimePercent = "% Guest Run Time"
- hypervisorLogicalProcessorHypervisorRunTimePercent = "% Hypervisor Run Time"
- hypervisorLogicalProcessorTotalRunTimePercent = "% Total Run Time"
- hypervisorLogicalProcessorIdleRunTimePercent = "% Idle Time"
- hypervisorLogicalProcessorContextSwitches = "Context Switches/sec"
-)
+type perfDataCounterValuesHypervisorLogicalProcessor struct {
+ Name string
+
+ HypervisorLogicalProcessorGuestRunTimePercent float64 `perfdata:"% Guest Run Time"`
+ HypervisorLogicalProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"`
+ HypervisorLogicalProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"`
+ HypervisorLogicalProcessorIdleRunTimePercent float64 `perfdata:"% Idle Time"`
+ HypervisorLogicalProcessorContextSwitches float64 `perfdata:"Context Switches/sec"`
+}
func (c *Collector) buildHypervisorLogicalProcessor() error {
var err error
- c.perfDataCollectorHypervisorLogicalProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Logical Processor", perfdata.InstancesAll, []string{
- hypervisorLogicalProcessorGuestRunTimePercent,
- hypervisorLogicalProcessorHypervisorRunTimePercent,
- hypervisorLogicalProcessorTotalRunTimePercent,
- hypervisorLogicalProcessorIdleRunTimePercent,
- hypervisorLogicalProcessorContextSwitches,
- })
+ c.perfDataCollectorHypervisorLogicalProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorLogicalProcessor]("Hyper-V Hypervisor Logical Processor", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Logical Processor collector: %w", err)
}
@@ -82,53 +79,53 @@ func (c *Collector) buildHypervisorLogicalProcessor() error {
}
func (c *Collector) collectHypervisorLogicalProcessor(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorHypervisorLogicalProcessor.Collect()
+ err := c.perfDataCollectorHypervisorLogicalProcessor.Collect(&c.perfDataObjectHypervisorLogicalProcessor)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Hypervisor Logical Processor metrics: %w", err)
}
- for coreName, coreData := range data {
+ for _, data := range c.perfDataObjectHypervisorLogicalProcessor {
// The name format is Hv LP
- parts := strings.Split(coreName, " ")
+ parts := strings.Split(data.Name, " ")
if len(parts) != 3 {
- return fmt.Errorf("unexpected Hyper-V Hypervisor Logical Processor name format: %s", coreName)
+ return fmt.Errorf("unexpected Hyper-V Hypervisor Logical Processor name format: %s", data.Name)
}
- coreId := parts[2]
+ coreID := parts[2]
ch <- prometheus.MustNewConstMetric(
c.hypervisorLogicalProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorLogicalProcessorGuestRunTimePercent].FirstValue,
- coreId, "guest",
+ data.HypervisorLogicalProcessorGuestRunTimePercent,
+ coreID, "guest",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorLogicalProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorLogicalProcessorHypervisorRunTimePercent].FirstValue,
- coreId, "hypervisor",
+ data.HypervisorLogicalProcessorHypervisorRunTimePercent,
+ coreID, "hypervisor",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorLogicalProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorLogicalProcessorIdleRunTimePercent].FirstValue,
- coreId, "idle",
+ data.HypervisorLogicalProcessorIdleRunTimePercent,
+ coreID, "idle",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorLogicalProcessorTotalRunTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorLogicalProcessorTotalRunTimePercent].FirstValue,
- coreId,
+ data.HypervisorLogicalProcessorTotalRunTimePercent,
+ coreID,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorLogicalProcessorContextSwitches,
prometheus.CounterValue,
- coreData[hypervisorLogicalProcessorContextSwitches].FirstValue,
- coreId,
+ data.HypervisorLogicalProcessorContextSwitches,
+ coreID,
)
}
diff --git a/internal/collector/hyperv/hyperv_hypervisor_root_partition.go b/internal/collector/hyperv/hyperv_hypervisor_root_partition.go
index 2b5f07645..c8d5290e8 100644
--- a/internal/collector/hyperv/hyperv_hypervisor_root_partition.go
+++ b/internal/collector/hyperv/hyperv_hypervisor_root_partition.go
@@ -16,17 +16,18 @@
package hyperv
import (
- "errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorHypervisorRootPartition Hyper-V Hypervisor Root Partition metrics
type collectorHypervisorRootPartition struct {
- perfDataCollectorHypervisorRootPartition *perfdata.Collector
+ perfDataCollectorHypervisorRootPartition *pdh.Collector
+ perfDataObjectHypervisorRootPartition []perfDataCounterValuesHypervisorRootPartition
+
hypervisorRootPartitionAddressSpaces *prometheus.Desc // \Hyper-V Hypervisor Root Partition(*)\Address Spaces
hypervisorRootPartitionAttachedDevices *prometheus.Desc // \Hyper-V Hypervisor Root Partition(*)\Attached Devices
hypervisorRootPartitionDepositedPages *prometheus.Desc // \Hyper-V Hypervisor Root Partition(*)\Deposited Pages
@@ -50,56 +51,34 @@ type collectorHypervisorRootPartition struct {
hypervisorRootPartitionVirtualTLBPages *prometheus.Desc // \Hyper-V Hypervisor Root Partition(*)\Virtual TLB Pages
}
-const (
- hypervisorRootPartitionAddressSpaces = "Address Spaces"
- hypervisorRootPartitionAttachedDevices = "Attached Devices"
- hypervisorRootPartitionDepositedPages = "Deposited Pages"
- hypervisorRootPartitionDeviceDMAErrors = "Device DMA Errors"
- hypervisorRootPartitionDeviceInterruptErrors = "Device Interrupt Errors"
- hypervisorRootPartitionDeviceInterruptMappings = "Device Interrupt Mappings"
- hypervisorRootPartitionDeviceInterruptThrottleEvents = "Device Interrupt Throttle Events"
- hypervisorRootPartitionGPAPages = "GPA Pages"
- hypervisorRootPartitionGPASpaceModifications = "GPA Space Modifications/sec"
- hypervisorRootPartitionIOTLBFlushCost = "I/O TLB Flush Cost"
- hypervisorRootPartitionIOTLBFlushes = "I/O TLB Flushes/sec"
- hypervisorRootPartitionRecommendedVirtualTLBSize = "Recommended Virtual TLB Size"
- hypervisorRootPartitionSkippedTimerTicks = "Skipped Timer Ticks"
- hypervisorRootPartition1GDevicePages = "1G device pages"
- hypervisorRootPartition1GGPAPages = "1G GPA pages"
- hypervisorRootPartition2MDevicePages = "2M device pages"
- hypervisorRootPartition2MGPAPages = "2M GPA pages"
- hypervisorRootPartition4KDevicePages = "4K device pages"
- hypervisorRootPartition4KGPAPages = "4K GPA pages"
- hypervisorRootPartitionVirtualTLBFlushEntries = "Virtual TLB Flush Entires/sec"
- hypervisorRootPartitionVirtualTLBPages = "Virtual TLB Pages"
-)
+type perfDataCounterValuesHypervisorRootPartition struct {
+ HypervisorRootPartitionAddressSpaces float64 `perfdata:"Address Spaces"`
+ HypervisorRootPartitionAttachedDevices float64 `perfdata:"Attached Devices"`
+ HypervisorRootPartitionDepositedPages float64 `perfdata:"Deposited Pages"`
+ HypervisorRootPartitionDeviceDMAErrors float64 `perfdata:"Device DMA Errors"`
+ HypervisorRootPartitionDeviceInterruptErrors float64 `perfdata:"Device Interrupt Errors"`
+ HypervisorRootPartitionDeviceInterruptMappings float64 `perfdata:"Device Interrupt Mappings"`
+ HypervisorRootPartitionDeviceInterruptThrottleEvents float64 `perfdata:"Device Interrupt Throttle Events"`
+ HypervisorRootPartitionGPAPages float64 `perfdata:"GPA Pages"`
+ HypervisorRootPartitionGPASpaceModifications float64 `perfdata:"GPA Space Modifications/sec"`
+ HypervisorRootPartitionIOTLBFlushCost float64 `perfdata:"I/O TLB Flush Cost"`
+ HypervisorRootPartitionIOTLBFlushes float64 `perfdata:"I/O TLB Flushes/sec"`
+ HypervisorRootPartitionRecommendedVirtualTLBSize float64 `perfdata:"Recommended Virtual TLB Size"`
+ HypervisorRootPartitionSkippedTimerTicks float64 `perfdata:"Skipped Timer Ticks"`
+ HypervisorRootPartition1GDevicePages float64 `perfdata:"1G device pages"`
+ HypervisorRootPartition1GGPAPages float64 `perfdata:"1G GPA pages"`
+ HypervisorRootPartition2MDevicePages float64 `perfdata:"2M device pages"`
+ HypervisorRootPartition2MGPAPages float64 `perfdata:"2M GPA pages"`
+ HypervisorRootPartition4KDevicePages float64 `perfdata:"4K device pages"`
+ HypervisorRootPartition4KGPAPages float64 `perfdata:"4K GPA pages"`
+ HypervisorRootPartitionVirtualTLBFlushEntries float64 `perfdata:"Virtual TLB Flush Entires/sec"`
+ HypervisorRootPartitionVirtualTLBPages float64 `perfdata:"Virtual TLB Pages"`
+}
func (c *Collector) buildHypervisorRootPartition() error {
var err error
- c.perfDataCollectorHypervisorRootPartition, err = perfdata.NewCollector("Hyper-V Hypervisor Root Partition", []string{"Root"}, []string{
- hypervisorRootPartitionAddressSpaces,
- hypervisorRootPartitionAttachedDevices,
- hypervisorRootPartitionDepositedPages,
- hypervisorRootPartitionDeviceDMAErrors,
- hypervisorRootPartitionDeviceInterruptErrors,
- hypervisorRootPartitionDeviceInterruptMappings,
- hypervisorRootPartitionDeviceInterruptThrottleEvents,
- hypervisorRootPartitionGPAPages,
- hypervisorRootPartitionGPASpaceModifications,
- hypervisorRootPartitionIOTLBFlushCost,
- hypervisorRootPartitionIOTLBFlushes,
- hypervisorRootPartitionRecommendedVirtualTLBSize,
- hypervisorRootPartitionSkippedTimerTicks,
- hypervisorRootPartition1GDevicePages,
- hypervisorRootPartition1GGPAPages,
- hypervisorRootPartition2MDevicePages,
- hypervisorRootPartition2MGPAPages,
- hypervisorRootPartition4KDevicePages,
- hypervisorRootPartition4KGPAPages,
- hypervisorRootPartitionVirtualTLBFlushEntries,
- hypervisorRootPartitionVirtualTLBPages,
- })
+ c.perfDataCollectorHypervisorRootPartition, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootPartition]("Hyper-V Hypervisor Root Partition", []string{"Root"})
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Root Partition collector: %w", err)
}
@@ -235,129 +214,124 @@ func (c *Collector) buildHypervisorRootPartition() error {
}
func (c *Collector) collectHypervisorRootPartition(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorHypervisorRootPartition.Collect()
+ err := c.perfDataCollectorHypervisorRootPartition.Collect(&c.perfDataObjectHypervisorRootPartition)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Hypervisor Root Partition metrics: %w", err)
}
- rootData, ok := data["Root"]
- if !ok {
- return errors.New("no data returned from Hyper-V Hypervisor Root Partition")
- }
-
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionAddressSpaces,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionAddressSpaces].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionAddressSpaces,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionAttachedDevices,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionAttachedDevices].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionAttachedDevices,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionDepositedPages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionDepositedPages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionDepositedPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionDeviceDMAErrors,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionDeviceDMAErrors].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionDeviceDMAErrors,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionDeviceInterruptErrors,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionDeviceInterruptErrors].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionDeviceInterruptErrors,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionDeviceInterruptThrottleEvents,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionDeviceInterruptThrottleEvents].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionDeviceInterruptThrottleEvents,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionGPAPages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionGPAPages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionGPAPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionGPASpaceModifications,
prometheus.CounterValue,
- rootData[hypervisorRootPartitionGPASpaceModifications].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionGPASpaceModifications,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionIOTLBFlushCost,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionIOTLBFlushCost].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionIOTLBFlushCost,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionIOTLBFlushes,
prometheus.CounterValue,
- rootData[hypervisorRootPartitionIOTLBFlushes].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionIOTLBFlushes,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionRecommendedVirtualTLBSize,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionRecommendedVirtualTLBSize].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionRecommendedVirtualTLBSize,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionSkippedTimerTicks,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionSkippedTimerTicks].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionSkippedTimerTicks,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition1GDevicePages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartition1GDevicePages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition1GDevicePages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition1GGPAPages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartition1GGPAPages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition1GGPAPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition2MDevicePages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartition2MDevicePages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition2MDevicePages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition2MGPAPages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartition2MGPAPages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition2MGPAPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition4KDevicePages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartition4KDevicePages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition4KDevicePages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition4KGPAPages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartition4KGPAPages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition4KGPAPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionVirtualTLBFlushEntries,
prometheus.CounterValue,
- rootData[hypervisorRootPartitionVirtualTLBFlushEntries].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionVirtualTLBFlushEntries,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionVirtualTLBPages,
prometheus.GaugeValue,
- rootData[hypervisorRootPartitionVirtualTLBPages].FirstValue,
+ c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionVirtualTLBPages,
)
return nil
diff --git a/internal/collector/hyperv/hyperv_hypervisor_root_virtual_processor.go b/internal/collector/hyperv/hyperv_hypervisor_root_virtual_processor.go
index dbaf01688..8ef5983d5 100644
--- a/internal/collector/hyperv/hyperv_hypervisor_root_virtual_processor.go
+++ b/internal/collector/hyperv/hyperv_hypervisor_root_virtual_processor.go
@@ -19,14 +19,15 @@ import (
"fmt"
"strings"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorHypervisorRootVirtualProcessor Hyper-V Hypervisor Root Virtual Processor metrics
type collectorHypervisorRootVirtualProcessor struct {
- perfDataCollectorHypervisorRootVirtualProcessor *perfdata.Collector
+ perfDataCollectorHypervisorRootVirtualProcessor *pdh.Collector
+ perfDataObjectHypervisorRootVirtualProcessor []perfDataCounterValuesHypervisorRootVirtualProcessor
// \Hyper-V Hypervisor Root Virtual Processor(*)\% Guest Run Time
// \Hyper-V Hypervisor Root Virtual Processor(*)\% Hypervisor Run Time
@@ -37,24 +38,20 @@ type collectorHypervisorRootVirtualProcessor struct {
hypervisorRootVirtualProcessorCPUWaitTimePerDispatch *prometheus.Desc // \Hyper-V Hypervisor Root Virtual Processor(*)\CPU Wait Time Per Dispatch
}
-const (
- hypervisorRootVirtualProcessorGuestRunTimePercent = "% Guest Run Time"
- hypervisorRootVirtualProcessorHypervisorRunTimePercent = "% Hypervisor Run Time"
- hypervisorRootVirtualProcessorTotalRunTimePercent = "% Total Run Time"
- hypervisorRootVirtualProcessorRemoteRunTimePercent = "% Remote Run Time"
- hypervisorRootVirtualProcessorCPUWaitTimePerDispatch = "CPU Wait Time Per Dispatch"
-)
+type perfDataCounterValuesHypervisorRootVirtualProcessor struct {
+ Name string
+
+ HypervisorRootVirtualProcessorGuestRunTimePercent float64 `perfdata:"% Guest Run Time"`
+ HypervisorRootVirtualProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"`
+ HypervisorRootVirtualProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"`
+ HypervisorRootVirtualProcessorRemoteRunTimePercent float64 `perfdata:"% Remote Run Time"`
+ HypervisorRootVirtualProcessorCPUWaitTimePerDispatch float64 `perfdata:"CPU Wait Time Per Dispatch"`
+}
func (c *Collector) buildHypervisorRootVirtualProcessor() error {
var err error
- c.perfDataCollectorHypervisorRootVirtualProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Root Virtual Processor", perfdata.InstancesAll, []string{
- hypervisorRootVirtualProcessorGuestRunTimePercent,
- hypervisorRootVirtualProcessorHypervisorRunTimePercent,
- hypervisorRootVirtualProcessorTotalRunTimePercent,
- hypervisorRootVirtualProcessorRemoteRunTimePercent,
- hypervisorRootVirtualProcessorCPUWaitTimePerDispatch,
- })
+ c.perfDataCollectorHypervisorRootVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootVirtualProcessor]("Hyper-V Hypervisor Root Virtual Processor", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Root Virtual Processor collector: %w", err)
}
@@ -84,53 +81,53 @@ func (c *Collector) buildHypervisorRootVirtualProcessor() error {
}
func (c *Collector) collectHypervisorRootVirtualProcessor(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorHypervisorRootVirtualProcessor.Collect()
+ err := c.perfDataCollectorHypervisorRootVirtualProcessor.Collect(&c.perfDataObjectHypervisorRootVirtualProcessor)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Hypervisor Root Virtual Processor metrics: %w", err)
}
- for coreName, coreData := range data {
+ for _, data := range c.perfDataObjectHypervisorRootVirtualProcessor {
// The name format is Hv LP
- parts := strings.Split(coreName, " ")
+ parts := strings.Split(data.Name, " ")
if len(parts) != 3 {
- return fmt.Errorf("unexpected Hyper-V Hypervisor Root Virtual Processor name format: %s", coreName)
+ return fmt.Errorf("unexpected Hyper-V Hypervisor Root Virtual Processor name format: %s", data.Name)
}
- coreId := parts[2]
+ coreID := parts[2]
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootVirtualProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorRootVirtualProcessorGuestRunTimePercent].FirstValue,
- coreId, "guest_run",
+ data.HypervisorRootVirtualProcessorGuestRunTimePercent,
+ coreID, "guest_run",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootVirtualProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorRootVirtualProcessorHypervisorRunTimePercent].FirstValue,
- coreId, "hypervisor",
+ data.HypervisorRootVirtualProcessorHypervisorRunTimePercent,
+ coreID, "hypervisor",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootVirtualProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorRootVirtualProcessorRemoteRunTimePercent].FirstValue,
- coreId, "remote",
+ data.HypervisorRootVirtualProcessorRemoteRunTimePercent,
+ coreID, "remote",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootVirtualProcessorTotalRunTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorRootVirtualProcessorTotalRunTimePercent].FirstValue,
- coreId,
+ data.HypervisorRootVirtualProcessorTotalRunTimePercent,
+ coreID,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootVirtualProcessorCPUWaitTimePerDispatch,
prometheus.CounterValue,
- coreData[hypervisorRootVirtualProcessorCPUWaitTimePerDispatch].FirstValue,
- coreId,
+ data.HypervisorRootVirtualProcessorCPUWaitTimePerDispatch,
+ coreID,
)
}
diff --git a/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go b/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go
index 383613fc6..48f9d2746 100644
--- a/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go
+++ b/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go
@@ -19,14 +19,15 @@ import (
"fmt"
"strings"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorHypervisorVirtualProcessor Hyper-V Hypervisor Virtual Processor metrics
type collectorHypervisorVirtualProcessor struct {
- perfDataCollectorHypervisorVirtualProcessor *perfdata.Collector
+ perfDataCollectorHypervisorVirtualProcessor *pdh.Collector
+ perfDataObjectHypervisorVirtualProcessor []perfDataCounterValuesHypervisorVirtualProcessor
// \Hyper-V Hypervisor Virtual Processor(*)\% Guest Run Time
// \Hyper-V Hypervisor Virtual Processor(*)\% Hypervisor Run Time
@@ -36,24 +37,20 @@ type collectorHypervisorVirtualProcessor struct {
hypervisorVirtualProcessorContextSwitches *prometheus.Desc // \Hyper-V Hypervisor Virtual Processor(*)\CPU Wait Time Per Dispatch
}
-const (
- hypervisorVirtualProcessorGuestIdleTimePercent = "% Guest Idle Time"
- hypervisorVirtualProcessorHypervisorRunTimePercent = "% Hypervisor Run Time"
- hypervisorVirtualProcessorTotalRunTimePercent = "% Total Run Time"
- hypervisorVirtualProcessorRemoteRunTimePercent = "% Remote Run Time"
- hypervisorVirtualProcessorCPUWaitTimePerDispatch = "CPU Wait Time Per Dispatch"
-)
+type perfDataCounterValuesHypervisorVirtualProcessor struct {
+ Name string
+
+ HypervisorVirtualProcessorGuestIdleTimePercent float64 `perfdata:"% Guest Idle Time"`
+ HypervisorVirtualProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"`
+ HypervisorVirtualProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"`
+ HypervisorVirtualProcessorRemoteRunTimePercent float64 `perfdata:"% Remote Run Time"`
+ HypervisorVirtualProcessorCPUWaitTimePerDispatch float64 `perfdata:"CPU Wait Time Per Dispatch"`
+}
func (c *Collector) buildHypervisorVirtualProcessor() error {
var err error
- c.perfDataCollectorHypervisorVirtualProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Virtual Processor", perfdata.InstancesAll, []string{
- hypervisorVirtualProcessorGuestIdleTimePercent,
- hypervisorVirtualProcessorHypervisorRunTimePercent,
- hypervisorVirtualProcessorTotalRunTimePercent,
- hypervisorVirtualProcessorRemoteRunTimePercent,
- hypervisorVirtualProcessorCPUWaitTimePerDispatch,
- })
+ c.perfDataCollectorHypervisorVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorVirtualProcessor]("Hyper-V Hypervisor Virtual Processor", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Virtual Processor collector: %w", err)
}
@@ -81,16 +78,16 @@ func (c *Collector) buildHypervisorVirtualProcessor() error {
}
func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorHypervisorVirtualProcessor.Collect()
+ err := c.perfDataCollectorHypervisorVirtualProcessor.Collect(&c.perfDataObjectHypervisorVirtualProcessor)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Hypervisor Virtual Processor metrics: %w", err)
}
- for coreName, coreData := range data {
+ for _, data := range c.perfDataObjectHypervisorVirtualProcessor {
// The name format is :Hv VP
- parts := strings.Split(coreName, ":")
+ parts := strings.Split(data.Name, ":")
if len(parts) != 2 {
- return fmt.Errorf("unexpected format of Name in Hyper-V Hypervisor Virtual Processor: %q, expected %q", coreName, ":Hv VP ")
+ return fmt.Errorf("unexpected format of Name in Hyper-V Hypervisor Virtual Processor: %q, expected %q", data.Name, ":Hv VP ")
}
coreParts := strings.Split(parts[1], " ")
@@ -99,41 +96,41 @@ func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metri
}
vmName := parts[0]
- coreId := coreParts[2]
+ coreID := coreParts[2]
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorVirtualProcessorHypervisorRunTimePercent].FirstValue,
- vmName, coreId, "hypervisor",
+ data.HypervisorVirtualProcessorHypervisorRunTimePercent,
+ vmName, coreID, "hypervisor",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorVirtualProcessorGuestIdleTimePercent].FirstValue,
- vmName, coreId, "guest_idle",
+ data.HypervisorVirtualProcessorGuestIdleTimePercent,
+ vmName, coreID, "guest_idle",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorVirtualProcessorGuestIdleTimePercent].FirstValue,
- vmName, coreId, "guest_idle",
+ data.HypervisorVirtualProcessorGuestIdleTimePercent,
+ vmName, coreID, "guest_idle",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorTotalRunTimeTotal,
prometheus.CounterValue,
- coreData[hypervisorVirtualProcessorTotalRunTimePercent].FirstValue,
- vmName, coreId,
+ data.HypervisorVirtualProcessorTotalRunTimePercent,
+ vmName, coreID,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorContextSwitches,
prometheus.CounterValue,
- coreData[hypervisorVirtualProcessorCPUWaitTimePerDispatch].FirstValue,
- vmName, coreId,
+ data.HypervisorVirtualProcessorCPUWaitTimePerDispatch,
+ vmName, coreID,
)
}
diff --git a/internal/collector/hyperv/hyperv_legacy_network_adapter.go b/internal/collector/hyperv/hyperv_legacy_network_adapter.go
index 63b4147f1..6d8422f58 100644
--- a/internal/collector/hyperv/hyperv_legacy_network_adapter.go
+++ b/internal/collector/hyperv/hyperv_legacy_network_adapter.go
@@ -18,14 +18,15 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorLegacyNetworkAdapter Hyper-V Legacy Network Adapter metrics
type collectorLegacyNetworkAdapter struct {
- perfDataCollectorLegacyNetworkAdapter *perfdata.Collector
+ perfDataCollectorLegacyNetworkAdapter *pdh.Collector
+ perfDataObjectLegacyNetworkAdapter []perfDataCounterValuesLegacyNetworkAdapter
legacyNetworkAdapterBytesDropped *prometheus.Desc // \Hyper-V Legacy Network Adapter(*)\Bytes Dropped
legacyNetworkAdapterBytesReceived *prometheus.Desc // \Hyper-V Legacy Network Adapter(*)\Bytes Received/sec
@@ -35,26 +36,21 @@ type collectorLegacyNetworkAdapter struct {
legacyNetworkAdapterFramesSent *prometheus.Desc // \Hyper-V Legacy Network Adapter(*)\Frames Sent/sec
}
-const (
- legacyNetworkAdapterBytesDropped = "Bytes Dropped"
- legacyNetworkAdapterBytesReceived = "Bytes Received/sec"
- legacyNetworkAdapterBytesSent = "Bytes Sent/sec"
- legacyNetworkAdapterFramesDropped = "Frames Dropped"
- legacyNetworkAdapterFramesReceived = "Frames Received/sec"
- legacyNetworkAdapterFramesSent = "Frames Sent/sec"
-)
+type perfDataCounterValuesLegacyNetworkAdapter struct {
+ Name string
+
+ LegacyNetworkAdapterBytesDropped float64 `perfdata:"Bytes Dropped"`
+ LegacyNetworkAdapterBytesReceived float64 `perfdata:"Bytes Received/sec"`
+ LegacyNetworkAdapterBytesSent float64 `perfdata:"Bytes Sent/sec"`
+ LegacyNetworkAdapterFramesDropped float64 `perfdata:"Frames Dropped"`
+ LegacyNetworkAdapterFramesReceived float64 `perfdata:"Frames Received/sec"`
+ LegacyNetworkAdapterFramesSent float64 `perfdata:"Frames Sent/sec"`
+}
func (c *Collector) buildLegacyNetworkAdapter() error {
var err error
- c.perfDataCollectorLegacyNetworkAdapter, err = perfdata.NewCollector("Hyper-V Legacy Network Adapter", perfdata.InstancesAll, []string{
- legacyNetworkAdapterBytesDropped,
- legacyNetworkAdapterBytesReceived,
- legacyNetworkAdapterBytesSent,
- legacyNetworkAdapterFramesDropped,
- legacyNetworkAdapterFramesReceived,
- legacyNetworkAdapterFramesSent,
- })
+ c.perfDataCollectorLegacyNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesLegacyNetworkAdapter]("Hyper-V Legacy Network Adapter", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Legacy Network Adapter collector: %w", err)
}
@@ -100,52 +96,52 @@ func (c *Collector) buildLegacyNetworkAdapter() error {
}
func (c *Collector) collectLegacyNetworkAdapter(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorLegacyNetworkAdapter.Collect()
+ err := c.perfDataCollectorLegacyNetworkAdapter.Collect(&c.perfDataObjectLegacyNetworkAdapter)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Legacy Network Adapter metrics: %w", err)
}
- for name, adapter := range data {
+ for _, data := range c.perfDataObjectLegacyNetworkAdapter {
ch <- prometheus.MustNewConstMetric(
c.legacyNetworkAdapterBytesDropped,
prometheus.GaugeValue,
- adapter[legacyNetworkAdapterBytesDropped].FirstValue,
- name,
+ data.LegacyNetworkAdapterBytesDropped,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.legacyNetworkAdapterBytesReceived,
prometheus.CounterValue,
- adapter[legacyNetworkAdapterBytesReceived].FirstValue,
- name,
+ data.LegacyNetworkAdapterBytesReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.legacyNetworkAdapterBytesSent,
prometheus.CounterValue,
- adapter[legacyNetworkAdapterBytesSent].FirstValue,
- name,
+ data.LegacyNetworkAdapterBytesSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.legacyNetworkAdapterFramesReceived,
prometheus.CounterValue,
- adapter[legacyNetworkAdapterFramesReceived].FirstValue,
- name,
+ data.LegacyNetworkAdapterFramesReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.legacyNetworkAdapterFramesDropped,
prometheus.CounterValue,
- adapter[legacyNetworkAdapterFramesDropped].FirstValue,
- name,
+ data.LegacyNetworkAdapterFramesDropped,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.legacyNetworkAdapterFramesSent,
prometheus.CounterValue,
- adapter[legacyNetworkAdapterFramesSent].FirstValue,
- name,
+ data.LegacyNetworkAdapterFramesSent,
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_virtual_machine_health_summary.go b/internal/collector/hyperv/hyperv_virtual_machine_health_summary.go
index 4d08f6b25..d9525e6bb 100644
--- a/internal/collector/hyperv/hyperv_virtual_machine_health_summary.go
+++ b/internal/collector/hyperv/hyperv_virtual_machine_health_summary.go
@@ -16,36 +16,33 @@
package hyperv
import (
- "errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorVirtualMachineHealthSummary Hyper-V Virtual Machine Health Summary metrics
type collectorVirtualMachineHealthSummary struct {
- perfDataCollectorVirtualMachineHealthSummary *perfdata.Collector
+ perfDataCollectorVirtualMachineHealthSummary *pdh.Collector
+ perfDataObjectVirtualMachineHealthSummary []perfDataCounterValuesVirtualMachineHealthSummary
// \Hyper-V Virtual Machine Health Summary\Health Critical
// \Hyper-V Virtual Machine Health Summary\Health Ok
health *prometheus.Desc
}
-const (
+type perfDataCounterValuesVirtualMachineHealthSummary struct {
// Hyper-V Virtual Machine Health Summary
- healthCritical = "Health Critical"
- healthOk = "Health Ok"
-)
+ HealthCritical float64 `perfdata:"Health Critical"`
+ HealthOk float64 `perfdata:"Health Ok"`
+}
func (c *Collector) buildVirtualMachineHealthSummary() error {
var err error
- c.perfDataCollectorVirtualMachineHealthSummary, err = perfdata.NewCollector("Hyper-V Virtual Machine Health Summary", nil, []string{
- healthCritical,
- healthOk,
- })
+ c.perfDataCollectorVirtualMachineHealthSummary, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineHealthSummary]("Hyper-V Virtual Machine Health Summary", nil)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err)
}
@@ -61,27 +58,22 @@ func (c *Collector) buildVirtualMachineHealthSummary() error {
}
func (c *Collector) collectVirtualMachineHealthSummary(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualMachineHealthSummary.Collect()
+ err := c.perfDataCollectorVirtualMachineHealthSummary.Collect(&c.perfDataObjectVirtualMachineHealthSummary)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Virtual Machine Health Summary metrics: %w", err)
}
- healthData, ok := data[perfdata.InstanceEmpty]
- if !ok {
- return errors.New("no data returned for Hyper-V Virtual Machine Health Summary")
- }
-
ch <- prometheus.MustNewConstMetric(
c.health,
prometheus.GaugeValue,
- healthData[healthCritical].FirstValue,
+ c.perfDataObjectVirtualMachineHealthSummary[0].HealthCritical,
"critical",
)
ch <- prometheus.MustNewConstMetric(
c.health,
prometheus.GaugeValue,
- healthData[healthOk].FirstValue,
+ c.perfDataObjectVirtualMachineHealthSummary[0].HealthOk,
"ok",
)
diff --git a/internal/collector/hyperv/hyperv_virtual_machine_vid_partition.go b/internal/collector/hyperv/hyperv_virtual_machine_vid_partition.go
index a0236df64..1db84e7d3 100644
--- a/internal/collector/hyperv/hyperv_virtual_machine_vid_partition.go
+++ b/internal/collector/hyperv/hyperv_virtual_machine_vid_partition.go
@@ -18,33 +18,33 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorVirtualMachineVidPartition Hyper-V VM Vid Partition metrics
type collectorVirtualMachineVidPartition struct {
- perfDataCollectorVirtualMachineVidPartition *perfdata.Collector
- physicalPagesAllocated *prometheus.Desc // \Hyper-V VM Vid Partition(*)\Physical Pages Allocated
- preferredNUMANodeIndex *prometheus.Desc // \Hyper-V VM Vid Partition(*)\Preferred NUMA Node Index
- remotePhysicalPages *prometheus.Desc // \Hyper-V VM Vid Partition(*)\Remote Physical Pages
+ perfDataCollectorVirtualMachineVidPartition *pdh.Collector
+ perfDataObjectVirtualMachineVidPartition []perfDataCounterValuesVirtualMachineVidPartition
+
+ physicalPagesAllocated *prometheus.Desc // \Hyper-V VM Vid Partition(*)\Physical Pages Allocated
+ preferredNUMANodeIndex *prometheus.Desc // \Hyper-V VM Vid Partition(*)\Preferred NUMA Node Index
+ remotePhysicalPages *prometheus.Desc // \Hyper-V VM Vid Partition(*)\Remote Physical Pages
}
-const (
- physicalPagesAllocated = "Physical Pages Allocated"
- preferredNUMANodeIndex = "Preferred NUMA Node Index"
- remotePhysicalPages = "Remote Physical Pages"
-)
+type perfDataCounterValuesVirtualMachineVidPartition struct {
+ Name string
+
+ PhysicalPagesAllocated float64 `perfdata:"Physical Pages Allocated"`
+ PreferredNUMANodeIndex float64 `perfdata:"Preferred NUMA Node Index"`
+ RemotePhysicalPages float64 `perfdata:"Remote Physical Pages"`
+}
func (c *Collector) buildVirtualMachineVidPartition() error {
var err error
- c.perfDataCollectorVirtualMachineVidPartition, err = perfdata.NewCollector("Hyper-V VM Vid Partition", perfdata.InstancesAll, []string{
- physicalPagesAllocated,
- preferredNUMANodeIndex,
- remotePhysicalPages,
- })
+ c.perfDataCollectorVirtualMachineVidPartition, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineVidPartition]("Hyper-V VM Vid Partition", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V VM Vid Partition collector: %w", err)
}
@@ -72,31 +72,31 @@ func (c *Collector) buildVirtualMachineVidPartition() error {
}
func (c *Collector) collectVirtualMachineVidPartition(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualMachineVidPartition.Collect()
+ err := c.perfDataCollectorVirtualMachineVidPartition.Collect(&c.perfDataObjectVirtualMachineVidPartition)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V VM Vid Partition metrics: %w", err)
}
- for name, page := range data {
+ for _, data := range c.perfDataObjectVirtualMachineVidPartition {
ch <- prometheus.MustNewConstMetric(
c.physicalPagesAllocated,
prometheus.GaugeValue,
- page[physicalPagesAllocated].FirstValue,
- name,
+ data.PhysicalPagesAllocated,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.preferredNUMANodeIndex,
prometheus.GaugeValue,
- page[preferredNUMANodeIndex].FirstValue,
- name,
+ data.PreferredNUMANodeIndex,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.remotePhysicalPages,
prometheus.GaugeValue,
- page[remotePhysicalPages].FirstValue,
- name,
+ data.RemotePhysicalPages,
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_virtual_network_adapter.go b/internal/collector/hyperv/hyperv_virtual_network_adapter.go
index 1287abd92..c624c57aa 100644
--- a/internal/collector/hyperv/hyperv_virtual_network_adapter.go
+++ b/internal/collector/hyperv/hyperv_virtual_network_adapter.go
@@ -18,14 +18,15 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorVirtualNetworkAdapter Hyper-V Virtual Network Adapter metrics
type collectorVirtualNetworkAdapter struct {
- perfDataCollectorVirtualNetworkAdapter *perfdata.Collector
+ perfDataCollectorVirtualNetworkAdapter *pdh.Collector
+ perfDataObjectVirtualNetworkAdapter []perfDataCounterValuesVirtualNetworkAdapter
virtualNetworkAdapterBytesReceived *prometheus.Desc // \Hyper-V Virtual Network Adapter(*)\Bytes Received/sec
virtualNetworkAdapterBytesSent *prometheus.Desc // \Hyper-V Virtual Network Adapter(*)\Bytes Sent/sec
@@ -35,26 +36,21 @@ type collectorVirtualNetworkAdapter struct {
virtualNetworkAdapterPacketsSent *prometheus.Desc // \Hyper-V Virtual Network Adapter(*)\Packets Sent/sec
}
-const (
- virtualNetworkAdapterBytesReceived = "Bytes Received/sec"
- virtualNetworkAdapterBytesSent = "Bytes Sent/sec"
- virtualNetworkAdapterDroppedPacketsIncoming = "Dropped Packets Incoming/sec"
- virtualNetworkAdapterDroppedPacketsOutgoing = "Dropped Packets Outgoing/sec"
- virtualNetworkAdapterPacketsReceived = "Packets Received/sec"
- virtualNetworkAdapterPacketsSent = "Packets Sent/sec"
-)
+type perfDataCounterValuesVirtualNetworkAdapter struct {
+ Name string
+
+ VirtualNetworkAdapterBytesReceived float64 `perfdata:"Bytes Received/sec"`
+ VirtualNetworkAdapterBytesSent float64 `perfdata:"Bytes Sent/sec"`
+ VirtualNetworkAdapterDroppedPacketsIncoming float64 `perfdata:"Dropped Packets Incoming/sec"`
+ VirtualNetworkAdapterDroppedPacketsOutgoing float64 `perfdata:"Dropped Packets Outgoing/sec"`
+ VirtualNetworkAdapterPacketsReceived float64 `perfdata:"Packets Received/sec"`
+ VirtualNetworkAdapterPacketsSent float64 `perfdata:"Packets Sent/sec"`
+}
func (c *Collector) buildVirtualNetworkAdapter() error {
var err error
- c.perfDataCollectorVirtualNetworkAdapter, err = perfdata.NewCollector("Hyper-V Virtual Network Adapter", perfdata.InstancesAll, []string{
- virtualNetworkAdapterBytesReceived,
- virtualNetworkAdapterBytesSent,
- virtualNetworkAdapterDroppedPacketsIncoming,
- virtualNetworkAdapterDroppedPacketsOutgoing,
- virtualNetworkAdapterPacketsReceived,
- virtualNetworkAdapterPacketsSent,
- })
+ c.perfDataCollectorVirtualNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapter]("Hyper-V Virtual Network Adapter", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter collector: %w", err)
}
@@ -100,52 +96,52 @@ func (c *Collector) buildVirtualNetworkAdapter() error {
}
func (c *Collector) collectVirtualNetworkAdapter(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualNetworkAdapter.Collect()
+ err := c.perfDataCollectorVirtualNetworkAdapter.Collect(&c.perfDataObjectVirtualNetworkAdapter)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Virtual Network Adapter metrics: %w", err)
}
- for name, adapterData := range data {
+ for _, data := range c.perfDataObjectVirtualNetworkAdapter {
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterBytesReceived,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterBytesReceived].FirstValue,
- name,
+ data.VirtualNetworkAdapterBytesReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterBytesSent,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterBytesSent].FirstValue,
- name,
+ data.VirtualNetworkAdapterBytesSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDroppedPacketsIncoming,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDroppedPacketsIncoming].FirstValue,
- name,
+ data.VirtualNetworkAdapterDroppedPacketsIncoming,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDroppedPacketsOutgoing,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDroppedPacketsOutgoing].FirstValue,
- name,
+ data.VirtualNetworkAdapterDroppedPacketsOutgoing,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterPacketsReceived,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterPacketsReceived].FirstValue,
- name,
+ data.VirtualNetworkAdapterPacketsReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterPacketsSent,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterPacketsSent].FirstValue,
- name,
+ data.VirtualNetworkAdapterPacketsSent,
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_virtual_network_adapter_drop_reasons.go b/internal/collector/hyperv/hyperv_virtual_network_adapter_drop_reasons.go
index 017f7dcd7..0e177719f 100644
--- a/internal/collector/hyperv/hyperv_virtual_network_adapter_drop_reasons.go
+++ b/internal/collector/hyperv/hyperv_virtual_network_adapter_drop_reasons.go
@@ -18,14 +18,15 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorVirtualNetworkAdapterDropReasons Hyper-V Virtual Network Adapter Drop Reasons metrics
type collectorVirtualNetworkAdapterDropReasons struct {
- perfDataCollectorVirtualNetworkAdapterDropReasons *perfdata.Collector
+ perfDataCollectorVirtualNetworkAdapterDropReasons *pdh.Collector
+ perfDataObjectVirtualNetworkAdapterDropReasons []perfDataCounterValuesVirtualNetworkAdapterDropReasons
// \Hyper-V Virtual Network Adapter Drop Reasons(*)\Outgoing LowPowerPacketFilter
// \Hyper-V Virtual Network Adapter Drop Reasons(*)\Incoming LowPowerPacketFilter
@@ -106,114 +107,65 @@ type collectorVirtualNetworkAdapterDropReasons struct {
virtualNetworkAdapterDropReasons *prometheus.Desc
}
-const (
- virtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq = "Outgoing NativeFwdingReq"
- virtualNetworkAdapterDropReasonsIncomingNativeFwdingReq = "Incoming NativeFwdingReq"
- virtualNetworkAdapterDropReasonsOutgoingMTUMismatch = "Outgoing MTUMismatch"
- virtualNetworkAdapterDropReasonsIncomingMTUMismatch = "Incoming MTUMismatch"
- virtualNetworkAdapterDropReasonsOutgoingInvalidConfig = "Outgoing InvalidConfig"
- virtualNetworkAdapterDropReasonsIncomingInvalidConfig = "Incoming InvalidConfig"
- virtualNetworkAdapterDropReasonsOutgoingRequiredExtensionMissing = "Outgoing RequiredExtensionMissing"
- virtualNetworkAdapterDropReasonsIncomingRequiredExtensionMissing = "Incoming RequiredExtensionMissing"
- virtualNetworkAdapterDropReasonsOutgoingVirtualSubnetId = "Outgoing VirtualSubnetId"
- virtualNetworkAdapterDropReasonsIncomingVirtualSubnetId = "Incoming VirtualSubnetId"
- virtualNetworkAdapterDropReasonsOutgoingBridgeReserved = "Outgoing BridgeReserved"
- virtualNetworkAdapterDropReasonsIncomingBridgeReserved = "Incoming BridgeReserved"
- virtualNetworkAdapterDropReasonsOutgoingRouterGuard = "Outgoing RouterGuard"
- virtualNetworkAdapterDropReasonsIncomingRouterGuard = "Incoming RouterGuard"
- virtualNetworkAdapterDropReasonsOutgoingDhcpGuard = "Outgoing DhcpGuard"
- virtualNetworkAdapterDropReasonsIncomingDhcpGuard = "Incoming DhcpGuard"
- virtualNetworkAdapterDropReasonsOutgoingMacSpoofing = "Outgoing MacSpoofing"
- virtualNetworkAdapterDropReasonsIncomingMacSpoofing = "Incoming MacSpoofing"
- virtualNetworkAdapterDropReasonsOutgoingIpsec = "Outgoing Ipsec"
- virtualNetworkAdapterDropReasonsIncomingIpsec = "Incoming Ipsec"
- virtualNetworkAdapterDropReasonsOutgoingQos = "Outgoing Qos"
- virtualNetworkAdapterDropReasonsIncomingQos = "Incoming Qos"
- virtualNetworkAdapterDropReasonsOutgoingFailedPvlanSetting = "Outgoing FailedPvlanSetting"
- virtualNetworkAdapterDropReasonsIncomingFailedPvlanSetting = "Incoming FailedPvlanSetting"
- virtualNetworkAdapterDropReasonsOutgoingFailedSecurityPolicy = "Outgoing FailedSecurityPolicy"
- virtualNetworkAdapterDropReasonsIncomingFailedSecurityPolicy = "Incoming FailedSecurityPolicy"
- virtualNetworkAdapterDropReasonsOutgoingUnauthorizedMAC = "Outgoing UnauthorizedMAC"
- virtualNetworkAdapterDropReasonsIncomingUnauthorizedMAC = "Incoming UnauthorizedMAC"
- virtualNetworkAdapterDropReasonsOutgoingUnauthorizedVLAN = "Outgoing UnauthorizedVLAN"
- virtualNetworkAdapterDropReasonsIncomingUnauthorizedVLAN = "Incoming UnauthorizedVLAN"
- virtualNetworkAdapterDropReasonsOutgoingFilteredVLAN = "Outgoing FilteredVLAN"
- virtualNetworkAdapterDropReasonsIncomingFilteredVLAN = "Incoming FilteredVLAN"
- virtualNetworkAdapterDropReasonsOutgoingFiltered = "Outgoing Filtered"
- virtualNetworkAdapterDropReasonsIncomingFiltered = "Incoming Filtered"
- virtualNetworkAdapterDropReasonsOutgoingBusy = "Outgoing Busy"
- virtualNetworkAdapterDropReasonsIncomingBusy = "Incoming Busy"
- virtualNetworkAdapterDropReasonsOutgoingNotAccepted = "Outgoing NotAccepted"
- virtualNetworkAdapterDropReasonsIncomingNotAccepted = "Incoming NotAccepted"
- virtualNetworkAdapterDropReasonsOutgoingDisconnected = "Outgoing Disconnected"
- virtualNetworkAdapterDropReasonsIncomingDisconnected = "Incoming Disconnected"
- virtualNetworkAdapterDropReasonsOutgoingNotReady = "Outgoing NotReady"
- virtualNetworkAdapterDropReasonsIncomingNotReady = "Incoming NotReady"
- virtualNetworkAdapterDropReasonsOutgoingResources = "Outgoing Resources"
- virtualNetworkAdapterDropReasonsIncomingResources = "Incoming Resources"
- virtualNetworkAdapterDropReasonsOutgoingInvalidPacket = "Outgoing InvalidPacket"
- virtualNetworkAdapterDropReasonsIncomingInvalidPacket = "Incoming InvalidPacket"
- virtualNetworkAdapterDropReasonsOutgoingInvalidData = "Outgoing InvalidData"
- virtualNetworkAdapterDropReasonsIncomingInvalidData = "Incoming InvalidData"
- virtualNetworkAdapterDropReasonsOutgoingUnknown = "Outgoing Unknown"
- virtualNetworkAdapterDropReasonsIncomingUnknown = "Incoming Unknown"
-)
+type perfDataCounterValuesVirtualNetworkAdapterDropReasons struct {
+ Name string
+
+ VirtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq float64 `perfdata:"Outgoing NativeFwdingReq"`
+ VirtualNetworkAdapterDropReasonsIncomingNativeFwdingReq float64 `perfdata:"Incoming NativeFwdingReq"`
+ VirtualNetworkAdapterDropReasonsOutgoingMTUMismatch float64 `perfdata:"Outgoing MTUMismatch"`
+ VirtualNetworkAdapterDropReasonsIncomingMTUMismatch float64 `perfdata:"Incoming MTUMismatch"`
+ VirtualNetworkAdapterDropReasonsOutgoingInvalidConfig float64 `perfdata:"Outgoing InvalidConfig"`
+ VirtualNetworkAdapterDropReasonsIncomingInvalidConfig float64 `perfdata:"Incoming InvalidConfig"`
+ VirtualNetworkAdapterDropReasonsOutgoingRequiredExtensionMissing float64 `perfdata:"Outgoing RequiredExtensionMissing"`
+ VirtualNetworkAdapterDropReasonsIncomingRequiredExtensionMissing float64 `perfdata:"Incoming RequiredExtensionMissing"`
+ VirtualNetworkAdapterDropReasonsOutgoingVirtualSubnetId float64 `perfdata:"Outgoing VirtualSubnetId"`
+ VirtualNetworkAdapterDropReasonsIncomingVirtualSubnetId float64 `perfdata:"Incoming VirtualSubnetId"`
+ VirtualNetworkAdapterDropReasonsOutgoingBridgeReserved float64 `perfdata:"Outgoing BridgeReserved"`
+ VirtualNetworkAdapterDropReasonsIncomingBridgeReserved float64 `perfdata:"Incoming BridgeReserved"`
+ VirtualNetworkAdapterDropReasonsOutgoingRouterGuard float64 `perfdata:"Outgoing RouterGuard"`
+ VirtualNetworkAdapterDropReasonsIncomingRouterGuard float64 `perfdata:"Incoming RouterGuard"`
+ VirtualNetworkAdapterDropReasonsOutgoingDhcpGuard float64 `perfdata:"Outgoing DhcpGuard"`
+ VirtualNetworkAdapterDropReasonsIncomingDhcpGuard float64 `perfdata:"Incoming DhcpGuard"`
+ VirtualNetworkAdapterDropReasonsOutgoingMacSpoofing float64 `perfdata:"Outgoing MacSpoofing"`
+ VirtualNetworkAdapterDropReasonsIncomingMacSpoofing float64 `perfdata:"Incoming MacSpoofing"`
+ VirtualNetworkAdapterDropReasonsOutgoingIpsec float64 `perfdata:"Outgoing Ipsec"`
+ VirtualNetworkAdapterDropReasonsIncomingIpsec float64 `perfdata:"Incoming Ipsec"`
+ VirtualNetworkAdapterDropReasonsOutgoingQos float64 `perfdata:"Outgoing Qos"`
+ VirtualNetworkAdapterDropReasonsIncomingQos float64 `perfdata:"Incoming Qos"`
+ VirtualNetworkAdapterDropReasonsOutgoingFailedPvlanSetting float64 `perfdata:"Outgoing FailedPvlanSetting"`
+ VirtualNetworkAdapterDropReasonsIncomingFailedPvlanSetting float64 `perfdata:"Incoming FailedPvlanSetting"`
+ VirtualNetworkAdapterDropReasonsOutgoingFailedSecurityPolicy float64 `perfdata:"Outgoing FailedSecurityPolicy"`
+ VirtualNetworkAdapterDropReasonsIncomingFailedSecurityPolicy float64 `perfdata:"Incoming FailedSecurityPolicy"`
+ VirtualNetworkAdapterDropReasonsOutgoingUnauthorizedMAC float64 `perfdata:"Outgoing UnauthorizedMAC"`
+ VirtualNetworkAdapterDropReasonsIncomingUnauthorizedMAC float64 `perfdata:"Incoming UnauthorizedMAC"`
+ VirtualNetworkAdapterDropReasonsOutgoingUnauthorizedVLAN float64 `perfdata:"Outgoing UnauthorizedVLAN"`
+ VirtualNetworkAdapterDropReasonsIncomingUnauthorizedVLAN float64 `perfdata:"Incoming UnauthorizedVLAN"`
+ VirtualNetworkAdapterDropReasonsOutgoingFilteredVLAN float64 `perfdata:"Outgoing FilteredVLAN"`
+ VirtualNetworkAdapterDropReasonsIncomingFilteredVLAN float64 `perfdata:"Incoming FilteredVLAN"`
+ VirtualNetworkAdapterDropReasonsOutgoingFiltered float64 `perfdata:"Outgoing Filtered"`
+ VirtualNetworkAdapterDropReasonsIncomingFiltered float64 `perfdata:"Incoming Filtered"`
+ VirtualNetworkAdapterDropReasonsOutgoingBusy float64 `perfdata:"Outgoing Busy"`
+ VirtualNetworkAdapterDropReasonsIncomingBusy float64 `perfdata:"Incoming Busy"`
+ VirtualNetworkAdapterDropReasonsOutgoingNotAccepted float64 `perfdata:"Outgoing NotAccepted"`
+ VirtualNetworkAdapterDropReasonsIncomingNotAccepted float64 `perfdata:"Incoming NotAccepted"`
+ VirtualNetworkAdapterDropReasonsOutgoingDisconnected float64 `perfdata:"Outgoing Disconnected"`
+ VirtualNetworkAdapterDropReasonsIncomingDisconnected float64 `perfdata:"Incoming Disconnected"`
+ VirtualNetworkAdapterDropReasonsOutgoingNotReady float64 `perfdata:"Outgoing NotReady"`
+ VirtualNetworkAdapterDropReasonsIncomingNotReady float64 `perfdata:"Incoming NotReady"`
+ VirtualNetworkAdapterDropReasonsOutgoingResources float64 `perfdata:"Outgoing Resources"`
+ VirtualNetworkAdapterDropReasonsIncomingResources float64 `perfdata:"Incoming Resources"`
+ VirtualNetworkAdapterDropReasonsOutgoingInvalidPacket float64 `perfdata:"Outgoing InvalidPacket"`
+ VirtualNetworkAdapterDropReasonsIncomingInvalidPacket float64 `perfdata:"Incoming InvalidPacket"`
+ VirtualNetworkAdapterDropReasonsOutgoingInvalidData float64 `perfdata:"Outgoing InvalidData"`
+ VirtualNetworkAdapterDropReasonsIncomingInvalidData float64 `perfdata:"Incoming InvalidData"`
+ VirtualNetworkAdapterDropReasonsOutgoingUnknown float64 `perfdata:"Outgoing Unknown"`
+ VirtualNetworkAdapterDropReasonsIncomingUnknown float64 `perfdata:"Incoming Unknown"`
+}
func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
var err error
- c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = perfdata.NewCollector("Hyper-V Virtual Network Adapter Drop Reasons", perfdata.InstancesAll, []string{
- virtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq,
- virtualNetworkAdapterDropReasonsIncomingNativeFwdingReq,
- virtualNetworkAdapterDropReasonsOutgoingMTUMismatch,
- virtualNetworkAdapterDropReasonsIncomingMTUMismatch,
- virtualNetworkAdapterDropReasonsOutgoingInvalidConfig,
- virtualNetworkAdapterDropReasonsIncomingInvalidConfig,
- virtualNetworkAdapterDropReasonsOutgoingRequiredExtensionMissing,
- virtualNetworkAdapterDropReasonsIncomingRequiredExtensionMissing,
- virtualNetworkAdapterDropReasonsOutgoingVirtualSubnetId,
- virtualNetworkAdapterDropReasonsIncomingVirtualSubnetId,
- virtualNetworkAdapterDropReasonsOutgoingBridgeReserved,
- virtualNetworkAdapterDropReasonsIncomingBridgeReserved,
- virtualNetworkAdapterDropReasonsOutgoingRouterGuard,
- virtualNetworkAdapterDropReasonsIncomingRouterGuard,
- virtualNetworkAdapterDropReasonsOutgoingDhcpGuard,
- virtualNetworkAdapterDropReasonsIncomingDhcpGuard,
- virtualNetworkAdapterDropReasonsOutgoingMacSpoofing,
- virtualNetworkAdapterDropReasonsIncomingMacSpoofing,
- virtualNetworkAdapterDropReasonsOutgoingIpsec,
- virtualNetworkAdapterDropReasonsIncomingIpsec,
- virtualNetworkAdapterDropReasonsOutgoingQos,
- virtualNetworkAdapterDropReasonsIncomingQos,
- virtualNetworkAdapterDropReasonsOutgoingFailedPvlanSetting,
- virtualNetworkAdapterDropReasonsIncomingFailedPvlanSetting,
- virtualNetworkAdapterDropReasonsOutgoingFailedSecurityPolicy,
- virtualNetworkAdapterDropReasonsIncomingFailedSecurityPolicy,
- virtualNetworkAdapterDropReasonsOutgoingUnauthorizedMAC,
- virtualNetworkAdapterDropReasonsIncomingUnauthorizedMAC,
- virtualNetworkAdapterDropReasonsOutgoingUnauthorizedVLAN,
- virtualNetworkAdapterDropReasonsIncomingUnauthorizedVLAN,
- virtualNetworkAdapterDropReasonsOutgoingFilteredVLAN,
- virtualNetworkAdapterDropReasonsIncomingFilteredVLAN,
- virtualNetworkAdapterDropReasonsOutgoingFiltered,
- virtualNetworkAdapterDropReasonsIncomingFiltered,
- virtualNetworkAdapterDropReasonsOutgoingBusy,
- virtualNetworkAdapterDropReasonsIncomingBusy,
- virtualNetworkAdapterDropReasonsOutgoingNotAccepted,
- virtualNetworkAdapterDropReasonsIncomingNotAccepted,
- virtualNetworkAdapterDropReasonsOutgoingDisconnected,
- virtualNetworkAdapterDropReasonsIncomingDisconnected,
- virtualNetworkAdapterDropReasonsOutgoingNotReady,
- virtualNetworkAdapterDropReasonsIncomingNotReady,
- virtualNetworkAdapterDropReasonsOutgoingResources,
- virtualNetworkAdapterDropReasonsIncomingResources,
- virtualNetworkAdapterDropReasonsOutgoingInvalidPacket,
- virtualNetworkAdapterDropReasonsIncomingInvalidPacket,
- virtualNetworkAdapterDropReasonsOutgoingInvalidData,
- virtualNetworkAdapterDropReasonsIncomingInvalidData,
- virtualNetworkAdapterDropReasonsOutgoingUnknown,
- virtualNetworkAdapterDropReasonsIncomingUnknown,
- })
+ c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapterDropReasons]("Hyper-V Virtual Network Adapter Drop Reasons", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter Drop Reasons collector: %w", err)
}
@@ -229,311 +181,311 @@ func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
}
func (c *Collector) collectVirtualNetworkAdapterDropReasons(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualNetworkAdapterDropReasons.Collect()
+ err := c.perfDataCollectorVirtualNetworkAdapterDropReasons.Collect(&c.perfDataObjectVirtualNetworkAdapterDropReasons)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Virtual Network Adapter Drop Reasons metrics: %w", err)
}
- for name, adapterData := range data {
+ for _, data := range c.perfDataObjectVirtualNetworkAdapterDropReasons {
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq].FirstValue,
- name, "NativeFwdingReq", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq,
+ data.Name, "NativeFwdingReq", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingNativeFwdingReq].FirstValue,
- name, "NativeFwdingReq", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingNativeFwdingReq,
+ data.Name, "NativeFwdingReq", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingMTUMismatch].FirstValue,
- name, "MTUMismatch", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingMTUMismatch,
+ data.Name, "MTUMismatch", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingMTUMismatch].FirstValue,
- name, "MTUMismatch", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingMTUMismatch,
+ data.Name, "MTUMismatch", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingInvalidConfig].FirstValue,
- name, "InvalidConfig", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingInvalidConfig,
+ data.Name, "InvalidConfig", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingInvalidConfig].FirstValue,
- name, "InvalidConfig", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingInvalidConfig,
+ data.Name, "InvalidConfig", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingRequiredExtensionMissing].FirstValue,
- name, "RequiredExtensionMissing", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingRequiredExtensionMissing,
+ data.Name, "RequiredExtensionMissing", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingRequiredExtensionMissing].FirstValue,
- name, "RequiredExtensionMissing", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingRequiredExtensionMissing,
+ data.Name, "RequiredExtensionMissing", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingVirtualSubnetId].FirstValue,
- name, "VirtualSubnetId", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingVirtualSubnetId,
+ data.Name, "VirtualSubnetId", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingVirtualSubnetId].FirstValue,
- name, "VirtualSubnetId", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingVirtualSubnetId,
+ data.Name, "VirtualSubnetId", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingBridgeReserved].FirstValue,
- name, "BridgeReserved", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingBridgeReserved,
+ data.Name, "BridgeReserved", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingBridgeReserved].FirstValue,
- name, "BridgeReserved", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingBridgeReserved,
+ data.Name, "BridgeReserved", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingRouterGuard].FirstValue,
- name, "RouterGuard", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingRouterGuard,
+ data.Name, "RouterGuard", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingRouterGuard].FirstValue,
- name, "RouterGuard", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingRouterGuard,
+ data.Name, "RouterGuard", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingDhcpGuard].FirstValue,
- name, "DhcpGuard", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingDhcpGuard,
+ data.Name, "DhcpGuard", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingDhcpGuard].FirstValue,
- name, "DhcpGuard", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingDhcpGuard,
+ data.Name, "DhcpGuard", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingMacSpoofing].FirstValue,
- name, "MacSpoofing", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingMacSpoofing,
+ data.Name, "MacSpoofing", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingMacSpoofing].FirstValue,
- name, "MacSpoofing", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingMacSpoofing,
+ data.Name, "MacSpoofing", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingIpsec].FirstValue,
- name, "Ipsec", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingIpsec,
+ data.Name, "Ipsec", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingIpsec].FirstValue,
- name, "Ipsec", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingIpsec,
+ data.Name, "Ipsec", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingQos].FirstValue,
- name, "Qos", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingQos,
+ data.Name, "Qos", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingQos].FirstValue,
- name, "Qos", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingQos,
+ data.Name, "Qos", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingFailedPvlanSetting].FirstValue,
- name, "FailedPvlanSetting", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingFailedPvlanSetting,
+ data.Name, "FailedPvlanSetting", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingFailedPvlanSetting].FirstValue,
- name, "FailedPvlanSetting", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingFailedPvlanSetting,
+ data.Name, "FailedPvlanSetting", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingFailedSecurityPolicy].FirstValue,
- name, "FailedSecurityPolicy", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingFailedSecurityPolicy,
+ data.Name, "FailedSecurityPolicy", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingFailedSecurityPolicy].FirstValue,
- name, "FailedSecurityPolicy", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingFailedSecurityPolicy,
+ data.Name, "FailedSecurityPolicy", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingUnauthorizedMAC].FirstValue,
- name, "UnauthorizedMAC", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingUnauthorizedMAC,
+ data.Name, "UnauthorizedMAC", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingUnauthorizedMAC].FirstValue,
- name, "UnauthorizedMAC", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingUnauthorizedMAC,
+ data.Name, "UnauthorizedMAC", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingUnauthorizedVLAN].FirstValue,
- name, "UnauthorizedVLAN", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingUnauthorizedVLAN,
+ data.Name, "UnauthorizedVLAN", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingUnauthorizedVLAN].FirstValue,
- name, "UnauthorizedVLAN", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingUnauthorizedVLAN,
+ data.Name, "UnauthorizedVLAN", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingFilteredVLAN].FirstValue,
- name, "FilteredVLAN", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingFilteredVLAN,
+ data.Name, "FilteredVLAN", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingFilteredVLAN].FirstValue,
- name, "FilteredVLAN", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingFilteredVLAN,
+ data.Name, "FilteredVLAN", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingFiltered].FirstValue,
- name, "Filtered", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingFiltered,
+ data.Name, "Filtered", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingFiltered].FirstValue,
- name, "Filtered", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingFiltered,
+ data.Name, "Filtered", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingBusy].FirstValue,
- name, "Busy", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingBusy,
+ data.Name, "Busy", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingBusy].FirstValue,
- name, "Busy", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingBusy,
+ data.Name, "Busy", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingNotAccepted].FirstValue,
- name, "NotAccepted", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingNotAccepted,
+ data.Name, "NotAccepted", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingNotAccepted].FirstValue,
- name, "NotAccepted", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingNotAccepted,
+ data.Name, "NotAccepted", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingDisconnected].FirstValue,
- name, "Disconnected", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingDisconnected,
+ data.Name, "Disconnected", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingDisconnected].FirstValue,
- name, "Disconnected", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingDisconnected,
+ data.Name, "Disconnected", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingNotReady].FirstValue,
- name, "NotReady", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingNotReady,
+ data.Name, "NotReady", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingNotReady].FirstValue,
- name, "NotReady", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingNotReady,
+ data.Name, "NotReady", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingResources].FirstValue,
- name, "Resources", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingResources,
+ data.Name, "Resources", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingResources].FirstValue,
- name, "Resources", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingResources,
+ data.Name, "Resources", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingInvalidPacket].FirstValue,
- name, "InvalidPacket", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingInvalidPacket,
+ data.Name, "InvalidPacket", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingInvalidPacket].FirstValue,
- name, "InvalidPacket", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingInvalidPacket,
+ data.Name, "InvalidPacket", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingInvalidData].FirstValue,
- name, "InvalidData", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingInvalidData,
+ data.Name, "InvalidData", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingInvalidData].FirstValue,
- name, "InvalidData", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingInvalidData,
+ data.Name, "InvalidData", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsOutgoingUnknown].FirstValue,
- name, "Unknown", "outgoing",
+ data.VirtualNetworkAdapterDropReasonsOutgoingUnknown,
+ data.Name, "Unknown", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
- adapterData[virtualNetworkAdapterDropReasonsIncomingUnknown].FirstValue,
- name, "Unknown", "incoming",
+ data.VirtualNetworkAdapterDropReasonsIncomingUnknown,
+ data.Name, "Unknown", "incoming",
)
}
diff --git a/internal/collector/hyperv/hyperv_virtual_smb.go b/internal/collector/hyperv/hyperv_virtual_smb.go
index e00318f49..f45fce358 100644
--- a/internal/collector/hyperv/hyperv_virtual_smb.go
+++ b/internal/collector/hyperv/hyperv_virtual_smb.go
@@ -18,14 +18,15 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorVirtualSMB Hyper-V Virtual SMB metrics
type collectorVirtualSMB struct {
- perfDataCollectorVirtualSMB *perfdata.Collector
+ perfDataCollectorVirtualSMB *pdh.Collector
+ perfDataObjectVirtualSMB []perfDataCounterValuesVirtualSMB
virtualSMBDirectMappedSections *prometheus.Desc // \Hyper-V Virtual SMB(*)\Direct-Mapped Sections
virtualSMBDirectMappedPages *prometheus.Desc // \Hyper-V Virtual SMB(*)\Direct-Mapped Pages
@@ -46,48 +47,32 @@ type collectorVirtualSMB struct {
virtualSMBReceivedBytes *prometheus.Desc // \Hyper-V Virtual SMB(*)\Received Bytes/sec
}
-const (
- virtualSMBDirectMappedSections = "Direct-Mapped Sections"
- virtualSMBDirectMappedPages = "Direct-Mapped Pages"
- virtualSMBWriteBytesRDMA = "Write Bytes/sec (RDMA)"
- virtualSMBWriteBytes = "Write Bytes/sec"
- virtualSMBReadBytesRDMA = "Read Bytes/sec (RDMA)"
- virtualSMBReadBytes = "Read Bytes/sec"
- virtualSMBFlushRequests = "Flush Requests/sec"
- virtualSMBWriteRequestsRDMA = "Write Requests/sec (RDMA)"
- virtualSMBWriteRequests = "Write Requests/sec"
- virtualSMBReadRequestsRDMA = "Read Requests/sec (RDMA)"
- virtualSMBReadRequests = "Read Requests/sec"
- virtualSMBCurrentPendingRequests = "Current Pending Requests"
- virtualSMBCurrentOpenFileCount = "Current Open File Count"
- virtualSMBTreeConnectCount = "Tree Connect Count"
- virtualSMBRequests = "Requests/sec"
- virtualSMBSentBytes = "Sent Bytes/sec"
- virtualSMBReceivedBytes = "Received Bytes/sec"
-)
+type perfDataCounterValuesVirtualSMB struct {
+ Name string
+
+ VirtualSMBDirectMappedSections float64 `perfdata:"Direct-Mapped Sections"`
+ VirtualSMBDirectMappedPages float64 `perfdata:"Direct-Mapped Pages"`
+ VirtualSMBWriteBytesRDMA float64 `perfdata:"Write Bytes/sec (RDMA)"`
+ VirtualSMBWriteBytes float64 `perfdata:"Write Bytes/sec"`
+ VirtualSMBReadBytesRDMA float64 `perfdata:"Read Bytes/sec (RDMA)"`
+ VirtualSMBReadBytes float64 `perfdata:"Read Bytes/sec"`
+ VirtualSMBFlushRequests float64 `perfdata:"Flush Requests/sec"`
+ VirtualSMBWriteRequestsRDMA float64 `perfdata:"Write Requests/sec (RDMA)"`
+ VirtualSMBWriteRequests float64 `perfdata:"Write Requests/sec"`
+ VirtualSMBReadRequestsRDMA float64 `perfdata:"Read Requests/sec (RDMA)"`
+ VirtualSMBReadRequests float64 `perfdata:"Read Requests/sec"`
+ VirtualSMBCurrentPendingRequests float64 `perfdata:"Current Pending Requests"`
+ VirtualSMBCurrentOpenFileCount float64 `perfdata:"Current Open File Count"`
+ VirtualSMBTreeConnectCount float64 `perfdata:"Tree Connect Count"`
+ VirtualSMBRequests float64 `perfdata:"Requests/sec"`
+ VirtualSMBSentBytes float64 `perfdata:"Sent Bytes/sec"`
+ VirtualSMBReceivedBytes float64 `perfdata:"Received Bytes/sec"`
+}
func (c *Collector) buildVirtualSMB() error {
var err error
- c.perfDataCollectorVirtualSMB, err = perfdata.NewCollector("Hyper-V Virtual SMB", perfdata.InstancesAll, []string{
- virtualSMBDirectMappedSections,
- virtualSMBDirectMappedPages,
- virtualSMBWriteBytesRDMA,
- virtualSMBWriteBytes,
- virtualSMBReadBytesRDMA,
- virtualSMBReadBytes,
- virtualSMBFlushRequests,
- virtualSMBWriteRequestsRDMA,
- virtualSMBWriteRequests,
- virtualSMBReadRequestsRDMA,
- virtualSMBReadRequests,
- virtualSMBCurrentPendingRequests,
- virtualSMBCurrentOpenFileCount,
- virtualSMBTreeConnectCount,
- virtualSMBRequests,
- virtualSMBSentBytes,
- virtualSMBReceivedBytes,
- })
+ c.perfDataCollectorVirtualSMB, err = pdh.NewCollector[perfDataCounterValuesVirtualSMB]("Hyper-V Virtual SMB", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual SMB collector: %w", err)
}
@@ -199,129 +184,129 @@ func (c *Collector) buildVirtualSMB() error {
}
func (c *Collector) collectVirtualSMB(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualSMB.Collect()
+ err := c.perfDataCollectorVirtualSMB.Collect(&c.perfDataObjectVirtualSMB)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Virtual SMB metrics: %w", err)
}
- for name, smbData := range data {
+ for _, data := range c.perfDataObjectVirtualSMB {
ch <- prometheus.MustNewConstMetric(
c.virtualSMBDirectMappedSections,
prometheus.GaugeValue,
- smbData[virtualSMBDirectMappedSections].FirstValue,
- name,
+ data.VirtualSMBDirectMappedSections,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBDirectMappedPages,
prometheus.GaugeValue,
- smbData[virtualSMBDirectMappedPages].FirstValue,
- name,
+ data.VirtualSMBDirectMappedPages,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBWriteBytesRDMA,
prometheus.CounterValue,
- smbData[virtualSMBWriteBytesRDMA].FirstValue,
- name,
+ data.VirtualSMBWriteBytesRDMA,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBWriteBytes,
prometheus.CounterValue,
- smbData[virtualSMBWriteBytes].FirstValue,
- name,
+ data.VirtualSMBWriteBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBReadBytesRDMA,
prometheus.CounterValue,
- smbData[virtualSMBReadBytesRDMA].FirstValue,
- name,
+ data.VirtualSMBReadBytesRDMA,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBReadBytes,
prometheus.CounterValue,
- smbData[virtualSMBReadBytes].FirstValue,
- name,
+ data.VirtualSMBReadBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBFlushRequests,
prometheus.CounterValue,
- smbData[virtualSMBFlushRequests].FirstValue,
- name,
+ data.VirtualSMBFlushRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBWriteRequestsRDMA,
prometheus.CounterValue,
- smbData[virtualSMBWriteRequestsRDMA].FirstValue,
- name,
+ data.VirtualSMBWriteRequestsRDMA,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBWriteRequests,
prometheus.CounterValue,
- smbData[virtualSMBWriteRequests].FirstValue,
- name,
+ data.VirtualSMBWriteRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBReadRequestsRDMA,
prometheus.CounterValue,
- smbData[virtualSMBReadRequestsRDMA].FirstValue,
- name,
+ data.VirtualSMBReadRequestsRDMA,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBReadRequests,
prometheus.CounterValue,
- smbData[virtualSMBReadRequests].FirstValue,
- name,
+ data.VirtualSMBReadRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBCurrentPendingRequests,
prometheus.GaugeValue,
- smbData[virtualSMBCurrentPendingRequests].FirstValue,
- name,
+ data.VirtualSMBCurrentPendingRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBCurrentOpenFileCount,
prometheus.GaugeValue,
- smbData[virtualSMBCurrentOpenFileCount].FirstValue,
- name,
+ data.VirtualSMBCurrentOpenFileCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBTreeConnectCount,
prometheus.GaugeValue,
- smbData[virtualSMBTreeConnectCount].FirstValue,
- name,
+ data.VirtualSMBTreeConnectCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBRequests,
prometheus.CounterValue,
- smbData[virtualSMBRequests].FirstValue,
- name,
+ data.VirtualSMBRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBSentBytes,
prometheus.CounterValue,
- smbData[virtualSMBSentBytes].FirstValue,
- name,
+ data.VirtualSMBSentBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSMBReceivedBytes,
prometheus.CounterValue,
- smbData[virtualSMBReceivedBytes].FirstValue,
- name,
+ data.VirtualSMBReceivedBytes,
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_virtual_storage_device.go b/internal/collector/hyperv/hyperv_virtual_storage_device.go
index 4a38e73ab..0f26ec8b6 100644
--- a/internal/collector/hyperv/hyperv_virtual_storage_device.go
+++ b/internal/collector/hyperv/hyperv_virtual_storage_device.go
@@ -18,14 +18,15 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// Hyper-V Virtual Storage Device metrics
type collectorVirtualStorageDevice struct {
- perfDataCollectorVirtualStorageDevice *perfdata.Collector
+ perfDataCollectorVirtualStorageDevice *pdh.Collector
+ perfDataObjectVirtualStorageDevice []perfDataCounterValuesVirtualStorageDevice
virtualStorageDeviceErrorCount *prometheus.Desc // \Hyper-V Virtual Storage Device(*)\Error Count
virtualStorageDeviceQueueLength *prometheus.Desc // \Hyper-V Virtual Storage Device(*)\Queue Length
@@ -41,38 +42,27 @@ type collectorVirtualStorageDevice struct {
virtualStorageDeviceIOQuotaReplenishmentRate *prometheus.Desc // \Hyper-V Virtual Storage Device(*)\IO Quota Replenishment Rate
}
-const (
- virtualStorageDeviceErrorCount = "Error Count"
- virtualStorageDeviceQueueLength = "Queue Length"
- virtualStorageDeviceReadBytes = "Read Bytes/sec"
- virtualStorageDeviceReadOperations = "Read Count"
- virtualStorageDeviceWriteBytes = "Write Bytes/sec"
- virtualStorageDeviceWriteOperations = "Write Count"
- virtualStorageDeviceLatency = "Latency"
- virtualStorageDeviceThroughput = "Throughput"
- virtualStorageDeviceNormalizedThroughput = "Normalized Throughput"
- virtualStorageDeviceLowerQueueLength = "Lower Queue Length"
- virtualStorageDeviceLowerLatency = "Lower Latency"
- virtualStorageDeviceIOQuotaReplenishmentRate = "IO Quota Replenishment Rate"
-)
+type perfDataCounterValuesVirtualStorageDevice struct {
+ Name string
+
+ VirtualStorageDeviceErrorCount float64 `perfdata:"Error Count"`
+ VirtualStorageDeviceQueueLength float64 `perfdata:"Queue Length"`
+ VirtualStorageDeviceReadBytes float64 `perfdata:"Read Bytes/sec"`
+ VirtualStorageDeviceReadOperations float64 `perfdata:"Read Count"`
+ VirtualStorageDeviceWriteBytes float64 `perfdata:"Write Bytes/sec"`
+ VirtualStorageDeviceWriteOperations float64 `perfdata:"Write Count"`
+ VirtualStorageDeviceLatency float64 `perfdata:"Latency"`
+ VirtualStorageDeviceThroughput float64 `perfdata:"Throughput"`
+ VirtualStorageDeviceNormalizedThroughput float64 `perfdata:"Normalized Throughput"`
+ VirtualStorageDeviceLowerQueueLength float64 `perfdata:"Lower Queue Length"`
+ VirtualStorageDeviceLowerLatency float64 `perfdata:"Lower Latency"`
+ VirtualStorageDeviceIOQuotaReplenishmentRate float64 `perfdata:"IO Quota Replenishment Rate"`
+}
func (c *Collector) buildVirtualStorageDevice() error {
var err error
- c.perfDataCollectorVirtualStorageDevice, err = perfdata.NewCollector("Hyper-V Virtual Storage Device", perfdata.InstancesAll, []string{
- virtualStorageDeviceErrorCount,
- virtualStorageDeviceQueueLength,
- virtualStorageDeviceReadBytes,
- virtualStorageDeviceReadOperations,
- virtualStorageDeviceWriteBytes,
- virtualStorageDeviceWriteOperations,
- virtualStorageDeviceLatency,
- virtualStorageDeviceThroughput,
- virtualStorageDeviceNormalizedThroughput,
- virtualStorageDeviceLowerQueueLength,
- virtualStorageDeviceLowerLatency,
- virtualStorageDeviceIOQuotaReplenishmentRate,
- })
+ c.perfDataCollectorVirtualStorageDevice, err = pdh.NewCollector[perfDataCounterValuesVirtualStorageDevice]("Hyper-V Virtual Storage Device", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Storage Device collector: %w", err)
}
@@ -154,94 +144,94 @@ func (c *Collector) buildVirtualStorageDevice() error {
}
func (c *Collector) collectVirtualStorageDevice(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualStorageDevice.Collect()
+ err := c.perfDataCollectorVirtualStorageDevice.Collect(&c.perfDataObjectVirtualStorageDevice)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Virtual Storage Device metrics: %w", err)
}
- for name, device := range data {
+ for _, data := range c.perfDataObjectVirtualStorageDevice {
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceErrorCount,
prometheus.CounterValue,
- device[virtualStorageDeviceErrorCount].FirstValue,
- name,
+ data.VirtualStorageDeviceErrorCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceQueueLength,
prometheus.GaugeValue,
- device[virtualStorageDeviceQueueLength].FirstValue,
- name,
+ data.VirtualStorageDeviceQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceReadBytes,
prometheus.CounterValue,
- device[virtualStorageDeviceReadBytes].FirstValue,
- name,
+ data.VirtualStorageDeviceReadBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceReadOperations,
prometheus.CounterValue,
- device[virtualStorageDeviceReadOperations].FirstValue,
- name,
+ data.VirtualStorageDeviceReadOperations,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceWriteBytes,
prometheus.CounterValue,
- device[virtualStorageDeviceWriteBytes].FirstValue,
- name,
+ data.VirtualStorageDeviceWriteBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceWriteOperations,
prometheus.CounterValue,
- device[virtualStorageDeviceWriteOperations].FirstValue,
- name,
+ data.VirtualStorageDeviceWriteOperations,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceLatency,
prometheus.GaugeValue,
- device[virtualStorageDeviceLatency].FirstValue,
- name,
+ data.VirtualStorageDeviceLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceThroughput,
prometheus.GaugeValue,
- device[virtualStorageDeviceThroughput].FirstValue,
- name,
+ data.VirtualStorageDeviceThroughput,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceNormalizedThroughput,
prometheus.GaugeValue,
- device[virtualStorageDeviceNormalizedThroughput].FirstValue,
- name,
+ data.VirtualStorageDeviceNormalizedThroughput,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceLowerQueueLength,
prometheus.GaugeValue,
- device[virtualStorageDeviceLowerQueueLength].FirstValue,
- name,
+ data.VirtualStorageDeviceLowerQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceLowerLatency,
prometheus.GaugeValue,
- device[virtualStorageDeviceLowerLatency].FirstValue,
- name,
+ data.VirtualStorageDeviceLowerLatency,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualStorageDeviceIOQuotaReplenishmentRate,
prometheus.GaugeValue,
- device[virtualStorageDeviceIOQuotaReplenishmentRate].FirstValue,
- name,
+ data.VirtualStorageDeviceIOQuotaReplenishmentRate,
+ data.Name,
)
}
diff --git a/internal/collector/hyperv/hyperv_virtual_switch.go b/internal/collector/hyperv/hyperv_virtual_switch.go
index 389f6a7c5..9052d7c1b 100644
--- a/internal/collector/hyperv/hyperv_virtual_switch.go
+++ b/internal/collector/hyperv/hyperv_virtual_switch.go
@@ -18,14 +18,16 @@ package hyperv
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// collectorVirtualMachineHealthSummary Hyper-V Virtual Switch Summary metrics
type collectorVirtualSwitch struct {
- perfDataCollectorVirtualSwitch *perfdata.Collector
+ perfDataCollectorVirtualSwitch *pdh.Collector
+ perfDataObjectVirtualSwitch []perfDataCounterValuesVirtualSwitch
+
virtualSwitchBroadcastPacketsReceived *prometheus.Desc // \Hyper-V Virtual Switch(*)\Broadcast Packets Received/sec
virtualSwitchBroadcastPacketsSent *prometheus.Desc // \Hyper-V Virtual Switch(*)\Broadcast Packets Sent/sec
virtualSwitchBytes *prometheus.Desc // \Hyper-V Virtual Switch(*)\Bytes/sec
@@ -49,56 +51,36 @@ type collectorVirtualSwitch struct {
virtualSwitchPurgedMacAddresses *prometheus.Desc // \Hyper-V Virtual Switch(*)\Purged Mac Addresses
}
-const (
- virtualSwitchBroadcastPacketsReceived = "Broadcast Packets Received/sec"
- virtualSwitchBroadcastPacketsSent = "Broadcast Packets Sent/sec"
- virtualSwitchBytes = "Bytes/sec"
- virtualSwitchBytesReceived = "Bytes Received/sec"
- virtualSwitchBytesSent = "Bytes Sent/sec"
- virtualSwitchDirectedPacketsReceived = "Directed Packets Received/sec"
- virtualSwitchDirectedPacketsSent = "Directed Packets Sent/sec"
- virtualSwitchDroppedPacketsIncoming = "Dropped Packets Incoming/sec"
- virtualSwitchDroppedPacketsOutgoing = "Dropped Packets Outgoing/sec"
- virtualSwitchExtensionsDroppedPacketsIncoming = "Extensions Dropped Packets Incoming/sec"
- virtualSwitchExtensionsDroppedPacketsOutgoing = "Extensions Dropped Packets Outgoing/sec"
- virtualSwitchLearnedMacAddresses = "Learned Mac Addresses"
- virtualSwitchMulticastPacketsReceived = "Multicast Packets Received/sec"
- virtualSwitchMulticastPacketsSent = "Multicast Packets Sent/sec"
- virtualSwitchNumberOfSendChannelMoves = "Number of Send Channel Moves/sec"
- virtualSwitchNumberOfVMQMoves = "Number of VMQ Moves/sec"
- virtualSwitchPacketsFlooded = "Packets Flooded"
- virtualSwitchPackets = "Packets/sec"
- virtualSwitchPacketsReceived = "Packets Received/sec"
- virtualSwitchPacketsSent = "Packets Sent/sec"
- virtualSwitchPurgedMacAddresses = "Purged Mac Addresses"
-)
+type perfDataCounterValuesVirtualSwitch struct {
+ Name string
+
+ VirtualSwitchBroadcastPacketsReceived float64 `perfdata:"Broadcast Packets Received/sec"`
+ VirtualSwitchBroadcastPacketsSent float64 `perfdata:"Broadcast Packets Sent/sec"`
+ VirtualSwitchBytes float64 `perfdata:"Bytes/sec"`
+ VirtualSwitchBytesReceived float64 `perfdata:"Bytes Received/sec"`
+ VirtualSwitchBytesSent float64 `perfdata:"Bytes Sent/sec"`
+ VirtualSwitchDirectedPacketsReceived float64 `perfdata:"Directed Packets Received/sec"`
+ VirtualSwitchDirectedPacketsSent float64 `perfdata:"Directed Packets Sent/sec"`
+ VirtualSwitchDroppedPacketsIncoming float64 `perfdata:"Dropped Packets Incoming/sec"`
+ VirtualSwitchDroppedPacketsOutgoing float64 `perfdata:"Dropped Packets Outgoing/sec"`
+ VirtualSwitchExtensionsDroppedPacketsIncoming float64 `perfdata:"Extensions Dropped Packets Incoming/sec"`
+ VirtualSwitchExtensionsDroppedPacketsOutgoing float64 `perfdata:"Extensions Dropped Packets Outgoing/sec"`
+ VirtualSwitchLearnedMacAddresses float64 `perfdata:"Learned Mac Addresses"`
+ VirtualSwitchMulticastPacketsReceived float64 `perfdata:"Multicast Packets Received/sec"`
+ VirtualSwitchMulticastPacketsSent float64 `perfdata:"Multicast Packets Sent/sec"`
+ VirtualSwitchNumberOfSendChannelMoves float64 `perfdata:"Number of Send Channel Moves/sec"`
+ VirtualSwitchNumberOfVMQMoves float64 `perfdata:"Number of VMQ Moves/sec"`
+ VirtualSwitchPacketsFlooded float64 `perfdata:"Packets Flooded"`
+ VirtualSwitchPackets float64 `perfdata:"Packets/sec"`
+ VirtualSwitchPacketsReceived float64 `perfdata:"Packets Received/sec"`
+ VirtualSwitchPacketsSent float64 `perfdata:"Packets Sent/sec"`
+ VirtualSwitchPurgedMacAddresses float64 `perfdata:"Purged Mac Addresses"`
+}
func (c *Collector) buildVirtualSwitch() error {
var err error
- c.perfDataCollectorVirtualSwitch, err = perfdata.NewCollector("Hyper-V Virtual Switch", perfdata.InstancesAll, []string{
- virtualSwitchBroadcastPacketsReceived,
- virtualSwitchBroadcastPacketsSent,
- virtualSwitchBytes,
- virtualSwitchBytesReceived,
- virtualSwitchBytesSent,
- virtualSwitchDirectedPacketsReceived,
- virtualSwitchDirectedPacketsSent,
- virtualSwitchDroppedPacketsIncoming,
- virtualSwitchDroppedPacketsOutgoing,
- virtualSwitchExtensionsDroppedPacketsIncoming,
- virtualSwitchExtensionsDroppedPacketsOutgoing,
- virtualSwitchLearnedMacAddresses,
- virtualSwitchMulticastPacketsReceived,
- virtualSwitchMulticastPacketsSent,
- virtualSwitchNumberOfSendChannelMoves,
- virtualSwitchNumberOfVMQMoves,
- virtualSwitchPacketsFlooded,
- virtualSwitchPackets,
- virtualSwitchPacketsReceived,
- virtualSwitchPacketsSent,
- virtualSwitchPurgedMacAddresses,
- })
+ c.perfDataCollectorVirtualSwitch, err = pdh.NewCollector[perfDataCounterValuesVirtualSwitch]("Hyper-V Virtual Switch", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Switch collector: %w", err)
}
@@ -234,147 +216,147 @@ func (c *Collector) buildVirtualSwitch() error {
}
func (c *Collector) collectVirtualSwitch(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollectorVirtualSwitch.Collect()
+ err := c.perfDataCollectorVirtualSwitch.Collect(&c.perfDataObjectVirtualSwitch)
if err != nil {
return fmt.Errorf("failed to collect Hyper-V Virtual Switch metrics: %w", err)
}
- for name, switchData := range data {
+ for _, data := range c.perfDataObjectVirtualSwitch {
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchBroadcastPacketsReceived,
prometheus.CounterValue,
- switchData[virtualSwitchBroadcastPacketsReceived].FirstValue,
- name,
+ data.VirtualSwitchBroadcastPacketsReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchBroadcastPacketsSent,
prometheus.CounterValue,
- switchData[virtualSwitchBroadcastPacketsSent].FirstValue,
- name,
+ data.VirtualSwitchBroadcastPacketsSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchBytes,
prometheus.CounterValue,
- switchData[virtualSwitchBytes].FirstValue,
- name,
+ data.VirtualSwitchBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchBytesReceived,
prometheus.CounterValue,
- switchData[virtualSwitchBytesReceived].FirstValue,
- name,
+ data.VirtualSwitchBytesReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchBytesSent,
prometheus.CounterValue,
- switchData[virtualSwitchBytesSent].FirstValue,
- name,
+ data.VirtualSwitchBytesSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchDirectedPacketsReceived,
prometheus.CounterValue,
- switchData[virtualSwitchDirectedPacketsReceived].FirstValue,
- name,
+ data.VirtualSwitchDirectedPacketsReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchDirectedPacketsSent,
prometheus.CounterValue,
- switchData[virtualSwitchDirectedPacketsSent].FirstValue,
- name,
+ data.VirtualSwitchDirectedPacketsSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchDroppedPacketsIncoming,
prometheus.CounterValue,
- switchData[virtualSwitchDroppedPacketsIncoming].FirstValue,
- name,
+ data.VirtualSwitchDroppedPacketsIncoming,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchDroppedPacketsOutgoing,
prometheus.CounterValue,
- switchData[virtualSwitchDroppedPacketsOutgoing].FirstValue,
- name,
+ data.VirtualSwitchDroppedPacketsOutgoing,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchExtensionsDroppedPacketsIncoming,
prometheus.CounterValue,
- switchData[virtualSwitchExtensionsDroppedPacketsIncoming].FirstValue,
- name,
+ data.VirtualSwitchExtensionsDroppedPacketsIncoming,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchExtensionsDroppedPacketsOutgoing,
prometheus.CounterValue,
- switchData[virtualSwitchExtensionsDroppedPacketsOutgoing].FirstValue,
- name,
+ data.VirtualSwitchExtensionsDroppedPacketsOutgoing,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchLearnedMacAddresses,
prometheus.CounterValue,
- switchData[virtualSwitchLearnedMacAddresses].FirstValue,
- name,
+ data.VirtualSwitchLearnedMacAddresses,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchMulticastPacketsReceived,
prometheus.CounterValue,
- switchData[virtualSwitchMulticastPacketsReceived].FirstValue,
- name,
+ data.VirtualSwitchMulticastPacketsReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchMulticastPacketsSent,
prometheus.CounterValue,
- switchData[virtualSwitchMulticastPacketsSent].FirstValue,
- name,
+ data.VirtualSwitchMulticastPacketsSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchNumberOfSendChannelMoves,
prometheus.CounterValue,
- switchData[virtualSwitchNumberOfSendChannelMoves].FirstValue,
- name,
+ data.VirtualSwitchNumberOfSendChannelMoves,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchNumberOfVMQMoves,
prometheus.CounterValue,
- switchData[virtualSwitchNumberOfVMQMoves].FirstValue,
- name,
+ data.VirtualSwitchNumberOfVMQMoves,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPacketsFlooded,
prometheus.CounterValue,
- switchData[virtualSwitchPacketsFlooded].FirstValue,
- name,
+ data.VirtualSwitchPacketsFlooded,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPackets,
prometheus.CounterValue,
- switchData[virtualSwitchPackets].FirstValue,
- name,
+ data.VirtualSwitchPackets,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPacketsReceived,
prometheus.CounterValue,
- switchData[virtualSwitchPacketsReceived].FirstValue,
- name,
+ data.VirtualSwitchPacketsReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPacketsSent,
prometheus.CounterValue,
- switchData[virtualSwitchPacketsSent].FirstValue,
- name,
+ data.VirtualSwitchPacketsSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPurgedMacAddresses,
prometheus.CounterValue,
- switchData[virtualSwitchPurgedMacAddresses].FirstValue,
- name,
+ data.VirtualSwitchPurgedMacAddresses,
+ data.Name,
)
}
diff --git a/internal/collector/iis/iis.go b/internal/collector/iis/iis.go
index b1c59bfa0..9e66b4ca3 100644
--- a/internal/collector/iis/iis.go
+++ b/internal/collector/iis/iis.go
@@ -19,14 +19,12 @@ import (
"errors"
"fmt"
"log/slog"
- "maps"
"regexp"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
@@ -270,26 +268,36 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return errors.Join(errs...)
}
+type collectorName interface {
+ GetName() string
+}
+
// deduplicateIISNames deduplicate IIS site names from various IIS perflib objects.
//
// E.G. Given the following list of site names, "Site_B" would be
// discarded, and "Site_B#2" would be kept and presented as "Site_B" in the
// Collector metrics.
// [ "Site_A", "Site_B", "Site_C", "Site_B#2" ].
-func deduplicateIISNames(counterValues map[string]map[string]perfdata.CounterValue) {
- services := slices.Collect(maps.Keys(counterValues))
+func deduplicateIISNames[T collectorName](counterValues []T) {
+ indexes := make(map[string]int)
// Ensure IIS entry with the highest suffix occurs last
- slices.Sort(services)
+ slices.SortFunc(counterValues, func(a, b T) int {
+ return strings.Compare(a.GetName(), b.GetName())
+ })
// Use map to deduplicate IIS entries
- for _, entry := range services {
- name := strings.Split(entry, "#")[0]
- if name == entry {
+ for index, counterValue := range counterValues {
+ name := strings.Split(counterValue.GetName(), "#")[0]
+ if name == counterValue.GetName() {
continue
}
- counterValues[name] = counterValues[entry]
- delete(counterValues, entry)
+ if originalIndex, ok := indexes[name]; !ok {
+ counterValues[originalIndex] = counterValue
+ counterValues = slices.Delete(counterValues, index, 1)
+ } else {
+ indexes[name] = index
+ }
}
}
diff --git a/internal/collector/iis/iis_app_pool_was.go b/internal/collector/iis/iis_app_pool_was.go
index 686c9c779..48729f1a6 100644
--- a/internal/collector/iis/iis_app_pool_was.go
+++ b/internal/collector/iis/iis_app_pool_was.go
@@ -18,13 +18,14 @@ package iis
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorAppPoolWAS struct {
- perfDataCollectorAppPoolWAS *perfdata.Collector
+ perfDataCollectorAppPoolWAS *pdh.Collector
+ perfDataObjectAppPoolWAS []perfDataCounterValuesAppPoolWAS
currentApplicationPoolState *prometheus.Desc
currentApplicationPoolUptime *prometheus.Desc
@@ -40,22 +41,27 @@ type collectorAppPoolWAS struct {
totalWorkerProcessShutdownFailures *prometheus.Desc
totalWorkerProcessStartupFailures *prometheus.Desc
}
+type perfDataCounterValuesAppPoolWAS struct {
+ Name string
-const (
- CurrentApplicationPoolState = "Current Application Pool State"
- CurrentApplicationPoolUptime = "Current Application Pool Uptime"
- CurrentWorkerProcesses = "Current Worker Processes"
- MaximumWorkerProcesses = "Maximum Worker Processes"
- RecentWorkerProcessFailures = "Recent Worker Process Failures"
- TimeSinceLastWorkerProcessFailure = "Time Since Last Worker Process Failure"
- TotalApplicationPoolRecycles = "Total Application Pool Recycles"
- TotalApplicationPoolUptime = "Total Application Pool Uptime"
- TotalWorkerProcessesCreated = "Total Worker Processes Created"
- TotalWorkerProcessFailures = "Total Worker Process Failures"
- TotalWorkerProcessPingFailures = "Total Worker Process Ping Failures"
- TotalWorkerProcessShutdownFailures = "Total Worker Process Shutdown Failures"
- TotalWorkerProcessStartupFailures = "Total Worker Process Startup Failures"
-)
+ CurrentApplicationPoolState float64 `perfdata:"Current Application Pool State"`
+ CurrentApplicationPoolUptime float64 `perfdata:"Current Application Pool Uptime"`
+ CurrentWorkerProcesses float64 `perfdata:"Current Worker Processes"`
+ MaximumWorkerProcesses float64 `perfdata:"Maximum Worker Processes"`
+ RecentWorkerProcessFailures float64 `perfdata:"Recent Worker Process Failures"`
+ TimeSinceLastWorkerProcessFailure float64 `perfdata:"Time Since Last Worker Process Failure"`
+ TotalApplicationPoolRecycles float64 `perfdata:"Total Application Pool Recycles"`
+ TotalApplicationPoolUptime float64 `perfdata:"Total Application Pool Uptime"`
+ TotalWorkerProcessesCreated float64 `perfdata:"Total Worker Processes Created"`
+ TotalWorkerProcessFailures float64 `perfdata:"Total Worker Process Failures"`
+ TotalWorkerProcessPingFailures float64 `perfdata:"Total Worker Process Ping Failures"`
+ TotalWorkerProcessShutdownFailures float64 `perfdata:"Total Worker Process Shutdown Failures"`
+ TotalWorkerProcessStartupFailures float64 `perfdata:"Total Worker Process Startup Failures"`
+}
+
+func (p perfDataCounterValuesAppPoolWAS) GetName() string {
+ return p.Name
+}
//nolint:gochecknoglobals
var applicationStates = map[uint32]string{
@@ -71,21 +77,7 @@ var applicationStates = map[uint32]string{
func (c *Collector) buildAppPoolWAS() error {
var err error
- c.perfDataCollectorAppPoolWAS, err = perfdata.NewCollector("APP_POOL_WAS", perfdata.InstancesAll, []string{
- CurrentApplicationPoolState,
- CurrentApplicationPoolUptime,
- CurrentWorkerProcesses,
- MaximumWorkerProcesses,
- RecentWorkerProcessFailures,
- TimeSinceLastWorkerProcessFailure,
- TotalApplicationPoolRecycles,
- TotalApplicationPoolUptime,
- TotalWorkerProcessesCreated,
- TotalWorkerProcessFailures,
- TotalWorkerProcessPingFailures,
- TotalWorkerProcessShutdownFailures,
- TotalWorkerProcessStartupFailures,
- })
+ c.perfDataCollectorAppPoolWAS, err = pdh.NewCollector[perfDataCounterValuesAppPoolWAS]("APP_POOL_WAS", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create APP_POOL_WAS collector: %w", err)
}
@@ -174,21 +166,21 @@ func (c *Collector) buildAppPoolWAS() error {
}
func (c *Collector) collectAppPoolWAS(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorAppPoolWAS.Collect()
+ err := c.perfDataCollectorAppPoolWAS.Collect(&c.perfDataObjectAppPoolWAS)
if err != nil {
return fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err)
}
- deduplicateIISNames(perfData)
+ deduplicateIISNames(c.perfDataObjectAppPoolWAS)
- for name, app := range perfData {
- if c.config.AppExclude.MatchString(name) || !c.config.AppInclude.MatchString(name) {
+ for _, data := range c.perfDataObjectAppPoolWAS {
+ if c.config.AppExclude.MatchString(data.Name) || !c.config.AppInclude.MatchString(data.Name) {
continue
}
for key, label := range applicationStates {
isCurrentState := 0.0
- if key == uint32(app[CurrentApplicationPoolState].FirstValue) {
+ if key == uint32(data.CurrentApplicationPoolState) {
isCurrentState = 1.0
}
@@ -196,7 +188,7 @@ func (c *Collector) collectAppPoolWAS(ch chan<- prometheus.Metric) error {
c.currentApplicationPoolState,
prometheus.GaugeValue,
isCurrentState,
- name,
+ data.Name,
label,
)
}
@@ -204,74 +196,74 @@ func (c *Collector) collectAppPoolWAS(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.currentApplicationPoolUptime,
prometheus.GaugeValue,
- app[CurrentApplicationPoolUptime].FirstValue,
- name,
+ data.CurrentApplicationPoolUptime,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.currentWorkerProcesses,
prometheus.GaugeValue,
- app[CurrentWorkerProcesses].FirstValue,
- name,
+ data.CurrentWorkerProcesses,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.maximumWorkerProcesses,
prometheus.GaugeValue,
- app[MaximumWorkerProcesses].FirstValue,
- name,
+ data.MaximumWorkerProcesses,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.recentWorkerProcessFailures,
prometheus.GaugeValue,
- app[RecentWorkerProcessFailures].FirstValue,
- name,
+ data.RecentWorkerProcessFailures,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.timeSinceLastWorkerProcessFailure,
prometheus.GaugeValue,
- app[TimeSinceLastWorkerProcessFailure].FirstValue,
- name,
+ data.TimeSinceLastWorkerProcessFailure,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalApplicationPoolRecycles,
prometheus.CounterValue,
- app[TotalApplicationPoolRecycles].FirstValue,
- name,
+ data.TotalApplicationPoolRecycles,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalApplicationPoolUptime,
prometheus.CounterValue,
- app[TotalApplicationPoolUptime].FirstValue,
- name,
+ data.TotalApplicationPoolUptime,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessesCreated,
prometheus.CounterValue,
- app[TotalWorkerProcessesCreated].FirstValue,
- name,
+ data.TotalWorkerProcessesCreated,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessFailures,
prometheus.CounterValue,
- app[TotalWorkerProcessFailures].FirstValue,
- name,
+ data.TotalWorkerProcessFailures,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessPingFailures,
prometheus.CounterValue,
- app[TotalWorkerProcessPingFailures].FirstValue,
- name,
+ data.TotalWorkerProcessPingFailures,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessShutdownFailures,
prometheus.CounterValue,
- app[TotalWorkerProcessShutdownFailures].FirstValue,
- name,
+ data.TotalWorkerProcessShutdownFailures,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessStartupFailures,
prometheus.CounterValue,
- app[TotalWorkerProcessStartupFailures].FirstValue,
- name,
+ data.TotalWorkerProcessStartupFailures,
+ data.Name,
)
}
diff --git a/internal/collector/iis/iis_w3svc_w3wp.go b/internal/collector/iis/iis_w3svc_w3wp.go
index 41513a887..0df7ba398 100644
--- a/internal/collector/iis/iis_w3svc_w3wp.go
+++ b/internal/collector/iis/iis_w3svc_w3wp.go
@@ -20,13 +20,16 @@ import (
"regexp"
"strings"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorW3SVCW3WP struct {
- w3SVCW3WPPerfDataCollector *perfdata.Collector
+ w3SVCW3WPPerfDataCollector *pdh.Collector
+ w3SVCW3WPPerfDataCollectorV8 *pdh.Collector
+ perfDataObjectW3SVCW3WP []perfDataCounterValuesW3SVCW3WP
+ perfDataObjectW3SVCW3WPV8 []perfDataCounterValuesW3SVCW3WPV8
// W3SVC_W3WP
w3SVCW3WPThreads *prometheus.Desc
@@ -78,114 +81,87 @@ type collectorW3SVCW3WP struct {
var workerProcessNameExtractor = regexp.MustCompile(`^(\d+)_(.+)$`)
-const (
- w3SVCW3WPThreads = "Active Threads Count"
- w3SVCW3WPMaximumThreads = "Maximum Threads Count"
-
- w3SVCW3WPRequestsTotal = "Total HTTP Requests Served"
- w3SVCW3WPRequestsActive = "Active Requests"
-
- w3SVCW3WPActiveFlushedEntries = "Active Flushed Entries"
-
- w3SVCW3WPCurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
- w3SVCW3WPMaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
- w3SVCW3WPFileCacheFlushesTotal = "File Cache Flushes"
- w3SVCW3WPFileCacheHitsTotal = "File Cache Hits"
- w3SVCW3WPFileCacheMissesTotal = "File Cache Misses"
- w3SVCW3WPFilesCached = "Current Files Cached"
- w3SVCW3WPFilesCachedTotal = "Total Files Cached"
- w3SVCW3WPFilesFlushedTotal = "Total Flushed Files"
-
- w3SVCW3WPURICacheFlushesTotal = "Total Flushed URIs"
- w3SVCW3WPURICacheHitsTotal = "URI Cache Hits"
- w3SVCW3WPURICacheMissesTotal = "URI Cache Misses"
- w3SVCW3WPURIsCached = "Current URIs Cached"
- w3SVCW3WPURIsCachedTotal = "Total URIs Cached"
- w3SVCW3WPURIsFlushedTotal = "Total Flushed URIs"
-
- w3SVCW3WPMetaDataCacheHits = "Metadata Cache Hits"
- w3SVCW3WPMetaDataCacheMisses = "Metadata Cache Misses"
- w3SVCW3WPMetadataCached = "Current Metadata Cached"
- w3SVCW3WPMetadataCacheFlushes = "Metadata Cache Flushes"
- w3SVCW3WPMetadataCachedTotal = "Total Metadata Cached"
- w3SVCW3WPMetadataFlushedTotal = "Total Flushed Metadata"
-
- w3SVCW3WPOutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
- w3SVCW3WPOutputCacheItems = "Output Cache Current Items"
- w3SVCW3WPOutputCacheMemoryUsage = "Output Cache Current Memory Usage"
- w3SVCW3WPOutputCacheHitsTotal = "Output Cache Total Hits"
- w3SVCW3WPOutputCacheMissesTotal = "Output Cache Total Misses"
- w3SVCW3WPOutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
- w3SVCW3WPOutputCacheFlushesTotal = "Output Cache Total Flushes"
+type perfDataCounterValuesW3SVCW3WP struct {
+ Name string
+
+ W3SVCW3WPThreads float64 `perfdata:"Active Threads Count"`
+ W3SVCW3WPMaximumThreads float64 `perfdata:"Maximum Threads Count"`
+
+ W3SVCW3WPRequestsTotal float64 `perfdata:"Total HTTP Requests Served"`
+ W3SVCW3WPRequestsActive float64 `perfdata:"Active Requests"`
+
+ W3SVCW3WPActiveFlushedEntries float64 `perfdata:"Active Flushed Entries"`
+
+ W3SVCW3WPCurrentFileCacheMemoryUsage float64 `perfdata:"Current File Cache Memory Usage"`
+ W3SVCW3WPMaximumFileCacheMemoryUsage float64 `perfdata:"Maximum File Cache Memory Usage"`
+ W3SVCW3WPFileCacheFlushesTotal float64 `perfdata:"File Cache Flushes"`
+ W3SVCW3WPFileCacheHitsTotal float64 `perfdata:"File Cache Hits"`
+ W3SVCW3WPFileCacheMissesTotal float64 `perfdata:"File Cache Misses"`
+ W3SVCW3WPFilesCached float64 `perfdata:"Current Files Cached"`
+ W3SVCW3WPFilesCachedTotal float64 `perfdata:"Total Files Cached"`
+ W3SVCW3WPFilesFlushedTotal float64 `perfdata:"Total Flushed Files"`
+
+ W3SVCW3WPURICacheFlushesTotal float64 `perfdata:"Total Flushed URIs"`
+ W3SVCW3WPURICacheHitsTotal float64 `perfdata:"URI Cache Hits"`
+ W3SVCW3WPURICacheMissesTotal float64 `perfdata:"URI Cache Misses"`
+ W3SVCW3WPURIsCached float64 `perfdata:"Current URIs Cached"`
+ W3SVCW3WPURIsCachedTotal float64 `perfdata:"Total URIs Cached"`
+ W3SVCW3WPURIsFlushedTotal float64 `perfdata:"Total Flushed URIs"`
+
+ W3SVCW3WPMetaDataCacheHits float64 `perfdata:"Metadata Cache Hits"`
+ W3SVCW3WPMetaDataCacheMisses float64 `perfdata:"Metadata Cache Misses"`
+ W3SVCW3WPMetadataCached float64 `perfdata:"Current Metadata Cached"`
+ W3SVCW3WPMetadataCacheFlushes float64 `perfdata:"Metadata Cache Flushes"`
+ W3SVCW3WPMetadataCachedTotal float64 `perfdata:"Total Metadata Cached"`
+ W3SVCW3WPMetadataFlushedTotal float64 `perfdata:"Total Flushed Metadata"`
+
+ W3SVCW3WPOutputCacheActiveFlushedItems float64 `perfdata:"Output Cache Current Flushed Items"`
+ W3SVCW3WPOutputCacheItems float64 `perfdata:"Output Cache Current Items"`
+ W3SVCW3WPOutputCacheMemoryUsage float64 `perfdata:"Output Cache Current Memory Usage"`
+ W3SVCW3WPOutputCacheHitsTotal float64 `perfdata:"Output Cache Total Hits"`
+ W3SVCW3WPOutputCacheMissesTotal float64 `perfdata:"Output Cache Total Misses"`
+ W3SVCW3WPOutputCacheFlushedItemsTotal float64 `perfdata:"Output Cache Total Flushed Items"`
+ W3SVCW3WPOutputCacheFlushesTotal float64 `perfdata:"Output Cache Total Flushes"`
+}
- // IIS8
- w3SVCW3WPRequestErrors500 = "% 500 HTTP Response Sent"
- w3SVCW3WPRequestErrors404 = "% 404 HTTP Response Sent"
- w3SVCW3WPRequestErrors403 = "% 403 HTTP Response Sent"
- w3SVCW3WPRequestErrors401 = "% 401 HTTP Response Sent"
-
- w3SVCW3WPWebSocketRequestsActive = "WebSocket Active Requests"
- w3SVCW3WPWebSocketConnectionAttempts = "WebSocket Connection Attempts / Sec"
- w3SVCW3WPWebSocketConnectionsAccepted = "WebSocket Connections Accepted / Sec"
- w3SVCW3WPWebSocketConnectionsRejected = "WebSocket Connections Rejected / Sec"
-)
+func (p perfDataCounterValuesW3SVCW3WP) GetName() string {
+ return p.Name
+}
-func (c *Collector) buildW3SVCW3WP() error {
- counters := []string{
- w3SVCW3WPThreads,
- w3SVCW3WPMaximumThreads,
- w3SVCW3WPRequestsTotal,
- w3SVCW3WPRequestsActive,
- w3SVCW3WPActiveFlushedEntries,
- w3SVCW3WPCurrentFileCacheMemoryUsage,
- w3SVCW3WPMaximumFileCacheMemoryUsage,
- w3SVCW3WPFileCacheFlushesTotal,
- w3SVCW3WPFileCacheHitsTotal,
- w3SVCW3WPFileCacheMissesTotal,
- w3SVCW3WPFilesCached,
- w3SVCW3WPFilesCachedTotal,
- w3SVCW3WPFilesFlushedTotal,
- w3SVCW3WPURICacheFlushesTotal,
- w3SVCW3WPURICacheHitsTotal,
- w3SVCW3WPURICacheMissesTotal,
- w3SVCW3WPURIsCached,
- w3SVCW3WPURIsCachedTotal,
- w3SVCW3WPURIsFlushedTotal,
- w3SVCW3WPMetaDataCacheHits,
- w3SVCW3WPMetaDataCacheMisses,
- w3SVCW3WPMetadataCached,
- w3SVCW3WPMetadataCacheFlushes,
- w3SVCW3WPMetadataCachedTotal,
- w3SVCW3WPMetadataFlushedTotal,
- w3SVCW3WPOutputCacheActiveFlushedItems,
- w3SVCW3WPOutputCacheItems,
- w3SVCW3WPOutputCacheMemoryUsage,
- w3SVCW3WPOutputCacheHitsTotal,
- w3SVCW3WPOutputCacheMissesTotal,
- w3SVCW3WPOutputCacheFlushedItemsTotal,
- w3SVCW3WPOutputCacheFlushesTotal,
- }
+type perfDataCounterValuesW3SVCW3WPV8 struct {
+ Name string
- if c.iisVersion.major >= 8 {
- counters = append(counters, []string{
- w3SVCW3WPRequestErrors500,
- w3SVCW3WPRequestErrors404,
- w3SVCW3WPRequestErrors403,
- w3SVCW3WPRequestErrors401,
- w3SVCW3WPWebSocketRequestsActive,
- w3SVCW3WPWebSocketConnectionAttempts,
- w3SVCW3WPWebSocketConnectionsAccepted,
- w3SVCW3WPWebSocketConnectionsRejected,
- }...)
- }
+ // IIS8
+ W3SVCW3WPRequestErrors500 float64 `perfdata:"% 500 HTTP Response Sent"`
+ W3SVCW3WPRequestErrors404 float64 `perfdata:"% 404 HTTP Response Sent"`
+ W3SVCW3WPRequestErrors403 float64 `perfdata:"% 403 HTTP Response Sent"`
+ W3SVCW3WPRequestErrors401 float64 `perfdata:"% 401 HTTP Response Sent"`
+
+ W3SVCW3WPWebSocketRequestsActive float64 `perfdata:"WebSocket Active Requests"`
+ W3SVCW3WPWebSocketConnectionAttempts float64 `perfdata:"WebSocket Connection Attempts / Sec"`
+ W3SVCW3WPWebSocketConnectionsAccepted float64 `perfdata:"WebSocket Connections Accepted / Sec"`
+ W3SVCW3WPWebSocketConnectionsRejected float64 `perfdata:"WebSocket Connections Rejected / Sec"`
+}
+
+func (p perfDataCounterValuesW3SVCW3WPV8) GetName() string {
+ return p.Name
+}
+func (c *Collector) buildW3SVCW3WP() error {
var err error
- c.w3SVCW3WPPerfDataCollector, err = perfdata.NewCollector("W3SVC_W3WP", perfdata.InstancesAll, counters)
+ c.w3SVCW3WPPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WP]("W3SVC_W3WP", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
}
+ if c.iisVersion.major >= 8 {
+ c.w3SVCW3WPPerfDataCollectorV8, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WPV8]("W3SVC_W3WP", pdh.InstancesAll)
+ if err != nil {
+ return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
+ }
+ }
+
// W3SVC_W3WP
c.w3SVCW3WPThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_threads"),
@@ -415,22 +391,128 @@ func (c *Collector) buildW3SVCW3WP() error {
}
func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
- perfData, err := c.w3SVCW3WPPerfDataCollector.Collect()
+ if err := c.collectW3SVCW3WPv7(ch); err != nil {
+ return err
+ }
+
+ if c.iisVersion.major >= 8 {
+ if err := c.collectW3SVCW3WPv8(ch); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
+ err := c.w3SVCW3WPPerfDataCollector.Collect(&c.perfDataObjectW3SVCW3WPV8)
if err != nil {
return fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err)
}
- deduplicateIISNames(perfData)
+ deduplicateIISNames(c.perfDataObjectW3SVCW3WPV8)
- for name, app := range perfData {
- if c.config.AppExclude.MatchString(name) || !c.config.AppInclude.MatchString(name) {
+ for _, data := range c.perfDataObjectW3SVCW3WPV8 {
+ if c.config.AppExclude.MatchString(data.Name) || !c.config.AppInclude.MatchString(data.Name) {
continue
}
// Extract the apppool name from the format _
- pid := workerProcessNameExtractor.ReplaceAllString(name, "$1")
+ pid := workerProcessNameExtractor.ReplaceAllString(data.Name, "$1")
- name := workerProcessNameExtractor.ReplaceAllString(name, "$2")
+ name := workerProcessNameExtractor.ReplaceAllString(data.Name, "$2")
+ if name == "" || c.config.AppExclude.MatchString(name) ||
+ !c.config.AppInclude.MatchString(name) {
+ continue
+ }
+
+ // Duplicate instances are suffixed # with an index number. These should be ignored
+ if strings.Contains(name, "#") {
+ continue
+ }
+
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPRequestErrorsTotal,
+ prometheus.CounterValue,
+ data.W3SVCW3WPRequestErrors401,
+ name,
+ pid,
+ "401",
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPRequestErrorsTotal,
+ prometheus.CounterValue,
+ data.W3SVCW3WPRequestErrors403,
+ name,
+ pid,
+ "403",
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPRequestErrorsTotal,
+ prometheus.CounterValue,
+ data.W3SVCW3WPRequestErrors404,
+ name,
+ pid,
+ "404",
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPRequestErrorsTotal,
+ prometheus.CounterValue,
+ data.W3SVCW3WPRequestErrors500,
+ name,
+ pid,
+ "500",
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPWebSocketRequestsActive,
+ prometheus.CounterValue,
+ data.W3SVCW3WPWebSocketRequestsActive,
+ name,
+ pid,
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPWebSocketConnectionAttempts,
+ prometheus.CounterValue,
+ data.W3SVCW3WPWebSocketConnectionAttempts,
+ name,
+ pid,
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPWebSocketConnectionsAccepted,
+ prometheus.CounterValue,
+ data.W3SVCW3WPWebSocketConnectionsAccepted,
+ name,
+ pid,
+ )
+ ch <- prometheus.MustNewConstMetric(
+ c.w3SVCW3WPWebSocketConnectionsRejected,
+ prometheus.CounterValue,
+ data.W3SVCW3WPWebSocketConnectionsRejected,
+ name,
+ pid,
+ )
+ }
+
+ return nil
+}
+
+func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
+ err := c.w3SVCW3WPPerfDataCollector.Collect(&c.perfDataObjectW3SVCW3WP)
+ if err != nil {
+ return fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err)
+ }
+
+ deduplicateIISNames(c.perfDataObjectW3SVCW3WP)
+
+ for _, data := range c.perfDataObjectW3SVCW3WP {
+ if c.config.AppExclude.MatchString(data.Name) || !c.config.AppInclude.MatchString(data.Name) {
+ continue
+ }
+
+ // Extract the apppool name from the format _
+ pid := workerProcessNameExtractor.ReplaceAllString(data.Name, "$1")
+
+ name := workerProcessNameExtractor.ReplaceAllString(data.Name, "$2")
if name == "" || c.config.AppExclude.MatchString(name) ||
!c.config.AppInclude.MatchString(name) {
continue
@@ -444,7 +526,7 @@ func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPThreads,
prometheus.GaugeValue,
- app[w3SVCW3WPThreads].FirstValue,
+ data.W3SVCW3WPThreads,
name,
pid,
"busy",
@@ -452,283 +534,220 @@ func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMaximumThreads,
prometheus.CounterValue,
- app[w3SVCW3WPMaximumThreads].FirstValue,
+ data.W3SVCW3WPMaximumThreads,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestsTotal,
prometheus.CounterValue,
- app[w3SVCW3WPRequestsTotal].FirstValue,
+ data.W3SVCW3WPRequestsTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestsActive,
prometheus.CounterValue,
- app[w3SVCW3WPRequestsActive].FirstValue,
+ data.W3SVCW3WPRequestsActive,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPActiveFlushedEntries,
prometheus.GaugeValue,
- app[w3SVCW3WPActiveFlushedEntries].FirstValue,
+ data.W3SVCW3WPActiveFlushedEntries,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPCurrentFileCacheMemoryUsage,
prometheus.GaugeValue,
- app[w3SVCW3WPCurrentFileCacheMemoryUsage].FirstValue,
+ data.W3SVCW3WPCurrentFileCacheMemoryUsage,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMaximumFileCacheMemoryUsage,
prometheus.CounterValue,
- app[w3SVCW3WPMaximumFileCacheMemoryUsage].FirstValue,
+ data.W3SVCW3WPMaximumFileCacheMemoryUsage,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFileCacheFlushesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPFileCacheFlushesTotal].FirstValue,
+ data.W3SVCW3WPFileCacheFlushesTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFileCacheQueriesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPFileCacheHitsTotal].FirstValue+app[w3SVCW3WPFileCacheMissesTotal].FirstValue,
+ data.W3SVCW3WPFileCacheHitsTotal+data.W3SVCW3WPFileCacheMissesTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFileCacheHitsTotal,
prometheus.CounterValue,
- app[w3SVCW3WPFileCacheHitsTotal].FirstValue,
+ data.W3SVCW3WPFileCacheHitsTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFilesCached,
prometheus.GaugeValue,
- app[w3SVCW3WPFilesCached].FirstValue,
+ data.W3SVCW3WPFilesCached,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFilesCachedTotal,
prometheus.CounterValue,
- app[w3SVCW3WPFilesCachedTotal].FirstValue,
+ data.W3SVCW3WPFilesCachedTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFilesFlushedTotal,
prometheus.CounterValue,
- app[w3SVCW3WPFilesFlushedTotal].FirstValue,
+ data.W3SVCW3WPFilesFlushedTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURICacheFlushesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPURICacheFlushesTotal].FirstValue,
+ data.W3SVCW3WPURICacheFlushesTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURICacheQueriesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPURICacheHitsTotal].FirstValue+app[w3SVCW3WPURICacheMissesTotal].FirstValue,
+ data.W3SVCW3WPURICacheHitsTotal+data.W3SVCW3WPURICacheMissesTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURICacheHitsTotal,
prometheus.CounterValue,
- app[w3SVCW3WPURICacheHitsTotal].FirstValue,
+ data.W3SVCW3WPURICacheHitsTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURIsCached,
prometheus.GaugeValue,
- app[w3SVCW3WPURIsCached].FirstValue,
+ data.W3SVCW3WPURIsCached,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURIsCachedTotal,
prometheus.CounterValue,
- app[w3SVCW3WPURIsCachedTotal].FirstValue,
+ data.W3SVCW3WPURIsCachedTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURIsFlushedTotal,
prometheus.CounterValue,
- app[w3SVCW3WPURIsFlushedTotal].FirstValue,
+ data.W3SVCW3WPURIsFlushedTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCached,
prometheus.GaugeValue,
- app[w3SVCW3WPMetadataCached].FirstValue,
+ data.W3SVCW3WPMetadataCached,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCacheFlushes,
prometheus.CounterValue,
- app[w3SVCW3WPMetadataCacheFlushes].FirstValue,
+ data.W3SVCW3WPMetadataCacheFlushes,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCacheQueriesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPMetaDataCacheHits].FirstValue+app[w3SVCW3WPMetaDataCacheMisses].FirstValue,
+ data.W3SVCW3WPMetaDataCacheHits+data.W3SVCW3WPMetaDataCacheMisses,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCacheHitsTotal,
prometheus.CounterValue,
- app[w3SVCW3WPMetaDataCacheHits].FirstValue,
+ data.W3SVCW3WPMetaDataCacheHits,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCachedTotal,
prometheus.CounterValue,
- app[w3SVCW3WPMetadataCachedTotal].FirstValue,
+ data.W3SVCW3WPMetadataCachedTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataFlushedTotal,
prometheus.CounterValue,
- app[w3SVCW3WPMetadataFlushedTotal].FirstValue,
+ data.W3SVCW3WPMetadataFlushedTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheActiveFlushedItems,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheActiveFlushedItems].FirstValue,
+ data.W3SVCW3WPOutputCacheActiveFlushedItems,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheItems,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheItems].FirstValue,
+ data.W3SVCW3WPOutputCacheItems,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheMemoryUsage,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheMemoryUsage].FirstValue,
+ data.W3SVCW3WPOutputCacheMemoryUsage,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheQueriesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheHitsTotal].FirstValue+app[w3SVCW3WPOutputCacheMissesTotal].FirstValue,
+ data.W3SVCW3WPOutputCacheHitsTotal+data.W3SVCW3WPOutputCacheMissesTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheHitsTotal,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheHitsTotal].FirstValue,
+ data.W3SVCW3WPOutputCacheHitsTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheFlushedItemsTotal,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheFlushedItemsTotal].FirstValue,
+ data.W3SVCW3WPOutputCacheFlushedItemsTotal,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheFlushesTotal,
prometheus.CounterValue,
- app[w3SVCW3WPOutputCacheFlushesTotal].FirstValue,
- name,
- pid,
- )
-
- if c.iisVersion.major >= 8 {
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPRequestErrorsTotal,
- prometheus.CounterValue,
- app[w3SVCW3WPRequestErrors401].FirstValue,
- name,
- pid,
- "401",
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPRequestErrorsTotal,
- prometheus.CounterValue,
- app[w3SVCW3WPRequestErrors403].FirstValue,
- name,
- pid,
- "403",
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPRequestErrorsTotal,
- prometheus.CounterValue,
- app[w3SVCW3WPRequestErrors404].FirstValue,
- name,
- pid,
- "404",
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPRequestErrorsTotal,
- prometheus.CounterValue,
- app[w3SVCW3WPRequestErrors500].FirstValue,
- name,
- pid,
- "500",
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPWebSocketRequestsActive,
- prometheus.CounterValue,
- app[w3SVCW3WPWebSocketRequestsActive].FirstValue,
- name,
- pid,
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPWebSocketConnectionAttempts,
- prometheus.CounterValue,
- app[w3SVCW3WPWebSocketConnectionAttempts].FirstValue,
- name,
- pid,
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPWebSocketConnectionsAccepted,
- prometheus.CounterValue,
- app[w3SVCW3WPWebSocketConnectionsAccepted].FirstValue,
- name,
- pid,
- )
- ch <- prometheus.MustNewConstMetric(
- c.w3SVCW3WPWebSocketConnectionsRejected,
- prometheus.CounterValue,
- app[w3SVCW3WPWebSocketConnectionsRejected].FirstValue,
- name,
- pid,
- )
- }
+ data.W3SVCW3WPOutputCacheFlushesTotal,
+ name,
+ pid,
+ )
}
return nil
diff --git a/internal/collector/iis/iis_web_service.go b/internal/collector/iis/iis_web_service.go
index dcef8eede..001424888 100644
--- a/internal/collector/iis/iis_web_service.go
+++ b/internal/collector/iis/iis_web_service.go
@@ -18,13 +18,14 @@ package iis
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorWebService struct {
- perfDataCollectorWebService *perfdata.Collector
+ perfDataCollectorWebService *pdh.Collector
+ perfDataObjectWebService []perfDataCounterValuesWebService
webServiceCurrentAnonymousUsers *prometheus.Desc
webServiceCurrentBlockedAsyncIORequests *prometheus.Desc
@@ -50,88 +51,56 @@ type collectorWebService struct {
webServiceTotalRejectedAsyncIORequests *prometheus.Desc
}
-const (
- webServiceCurrentAnonymousUsers = "Current Anonymous Users"
- webServiceCurrentBlockedAsyncIORequests = "Current Blocked Async I/O Requests"
- webServiceCurrentCGIRequests = "Current CGI Requests"
- webServiceCurrentConnections = "Current Connections"
- webServiceCurrentISAPIExtensionRequests = "Current ISAPI Extension Requests"
- webServiceCurrentNonAnonymousUsers = "Current NonAnonymous Users"
- webServiceServiceUptime = "Service Uptime"
- webServiceTotalBytesReceived = "Total Bytes Received"
- webServiceTotalBytesSent = "Total Bytes Sent"
- webServiceTotalAnonymousUsers = "Total Anonymous Users"
- webServiceTotalBlockedAsyncIORequests = "Total Blocked Async I/O Requests"
- webServiceTotalCGIRequests = "Total CGI Requests"
- webServiceTotalConnectionAttemptsAllInstances = "Total Connection Attempts (all instances)"
- webServiceTotalFilesReceived = "Total Files Received"
- webServiceTotalFilesSent = "Total Files Sent"
- webServiceTotalISAPIExtensionRequests = "Total ISAPI Extension Requests"
- webServiceTotalLockedErrors = "Total Locked Errors"
- webServiceTotalLogonAttempts = "Total Logon Attempts"
- webServiceTotalNonAnonymousUsers = "Total NonAnonymous Users"
- webServiceTotalNotFoundErrors = "Total Not Found Errors"
- webServiceTotalRejectedAsyncIORequests = "Total Rejected Async I/O Requests"
- webServiceTotalCopyRequests = "Total Copy Requests"
- webServiceTotalDeleteRequests = "Total Delete Requests"
- webServiceTotalGetRequests = "Total Get Requests"
- webServiceTotalHeadRequests = "Total Head Requests"
- webServiceTotalLockRequests = "Total Lock Requests"
- webServiceTotalMkcolRequests = "Total Mkcol Requests"
- webServiceTotalMoveRequests = "Total Move Requests"
- webServiceTotalOptionsRequests = "Total Options Requests"
- webServiceTotalOtherRequests = "Total Other Request Methods"
- webServiceTotalPostRequests = "Total Post Requests"
- webServiceTotalPropfindRequests = "Total Propfind Requests"
- webServiceTotalProppatchRequests = "Total Proppatch Requests"
- webServiceTotalPutRequests = "Total Put Requests"
- webServiceTotalSearchRequests = "Total Search Requests"
- webServiceTotalTraceRequests = "Total Trace Requests"
- webServiceTotalUnlockRequests = "Total Unlock Requests"
-)
+type perfDataCounterValuesWebService struct {
+ Name string
+
+ WebServiceCurrentAnonymousUsers float64 `perfdata:"Current Anonymous Users"`
+ WebServiceCurrentBlockedAsyncIORequests float64 `perfdata:"Current Blocked Async I/O Requests"`
+ WebServiceCurrentCGIRequests float64 `perfdata:"Current CGI Requests"`
+ WebServiceCurrentConnections float64 `perfdata:"Current Connections"`
+ WebServiceCurrentISAPIExtensionRequests float64 `perfdata:"Current ISAPI Extension Requests"`
+ WebServiceCurrentNonAnonymousUsers float64 `perfdata:"Current NonAnonymous Users"`
+ WebServiceServiceUptime float64 `perfdata:"Service Uptime"`
+ WebServiceTotalBytesReceived float64 `perfdata:"Total Bytes Received"`
+ WebServiceTotalBytesSent float64 `perfdata:"Total Bytes Sent"`
+ WebServiceTotalAnonymousUsers float64 `perfdata:"Total Anonymous Users"`
+ WebServiceTotalBlockedAsyncIORequests float64 `perfdata:"Total Blocked Async I/O Requests"`
+ WebServiceTotalCGIRequests float64 `perfdata:"Total CGI Requests"`
+ WebServiceTotalConnectionAttemptsAllInstances float64 `perfdata:"Total Connection Attempts (all instances)"`
+ WebServiceTotalFilesReceived float64 `perfdata:"Total Files Received"`
+ WebServiceTotalFilesSent float64 `perfdata:"Total Files Sent"`
+ WebServiceTotalISAPIExtensionRequests float64 `perfdata:"Total ISAPI Extension Requests"`
+ WebServiceTotalLockedErrors float64 `perfdata:"Total Locked Errors"`
+ WebServiceTotalLogonAttempts float64 `perfdata:"Total Logon Attempts"`
+ WebServiceTotalNonAnonymousUsers float64 `perfdata:"Total NonAnonymous Users"`
+ WebServiceTotalNotFoundErrors float64 `perfdata:"Total Not Found Errors"`
+ WebServiceTotalRejectedAsyncIORequests float64 `perfdata:"Total Rejected Async I/O Requests"`
+ WebServiceTotalCopyRequests float64 `perfdata:"Total Copy Requests"`
+ WebServiceTotalDeleteRequests float64 `perfdata:"Total Delete Requests"`
+ WebServiceTotalGetRequests float64 `perfdata:"Total Get Requests"`
+ WebServiceTotalHeadRequests float64 `perfdata:"Total Head Requests"`
+ WebServiceTotalLockRequests float64 `perfdata:"Total Lock Requests"`
+ WebServiceTotalMkcolRequests float64 `perfdata:"Total Mkcol Requests"`
+ WebServiceTotalMoveRequests float64 `perfdata:"Total Move Requests"`
+ WebServiceTotalOptionsRequests float64 `perfdata:"Total Options Requests"`
+ WebServiceTotalOtherRequests float64 `perfdata:"Total Other Request Methods"`
+ WebServiceTotalPostRequests float64 `perfdata:"Total Post Requests"`
+ WebServiceTotalPropfindRequests float64 `perfdata:"Total Propfind Requests"`
+ WebServiceTotalProppatchRequests float64 `perfdata:"Total Proppatch Requests"`
+ WebServiceTotalPutRequests float64 `perfdata:"Total Put Requests"`
+ WebServiceTotalSearchRequests float64 `perfdata:"Total Search Requests"`
+ WebServiceTotalTraceRequests float64 `perfdata:"Total Trace Requests"`
+ WebServiceTotalUnlockRequests float64 `perfdata:"Total Unlock Requests"`
+}
+
+func (p perfDataCounterValuesWebService) GetName() string {
+ return p.Name
+}
func (c *Collector) buildWebService() error {
var err error
- c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service", perfdata.InstancesAll, []string{
- webServiceCurrentAnonymousUsers,
- webServiceCurrentBlockedAsyncIORequests,
- webServiceCurrentCGIRequests,
- webServiceCurrentConnections,
- webServiceCurrentISAPIExtensionRequests,
- webServiceCurrentNonAnonymousUsers,
- webServiceServiceUptime,
- webServiceTotalBytesReceived,
- webServiceTotalBytesSent,
- webServiceTotalAnonymousUsers,
- webServiceTotalBlockedAsyncIORequests,
- webServiceTotalCGIRequests,
- webServiceTotalConnectionAttemptsAllInstances,
- webServiceTotalFilesReceived,
- webServiceTotalFilesSent,
- webServiceTotalISAPIExtensionRequests,
- webServiceTotalLockedErrors,
- webServiceTotalLogonAttempts,
- webServiceTotalNonAnonymousUsers,
- webServiceTotalNotFoundErrors,
- webServiceTotalRejectedAsyncIORequests,
- webServiceTotalCopyRequests,
- webServiceTotalDeleteRequests,
- webServiceTotalGetRequests,
- webServiceTotalHeadRequests,
- webServiceTotalLockRequests,
- webServiceTotalMkcolRequests,
- webServiceTotalMoveRequests,
- webServiceTotalOptionsRequests,
- webServiceTotalOtherRequests,
- webServiceTotalPostRequests,
- webServiceTotalPropfindRequests,
- webServiceTotalProppatchRequests,
- webServiceTotalPutRequests,
- webServiceTotalSearchRequests,
- webServiceTotalTraceRequests,
- webServiceTotalUnlockRequests,
- })
+ c.perfDataCollectorWebService, err = pdh.NewCollector[perfDataCounterValuesWebService]("Web Service", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Web Service collector: %w", err)
}
@@ -273,254 +242,254 @@ func (c *Collector) buildWebService() error {
}
func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorWebService.Collect()
+ err := c.perfDataCollectorWebService.Collect(&c.perfDataObjectWebService)
if err != nil {
return fmt.Errorf("failed to collect Web Service metrics: %w", err)
}
- deduplicateIISNames(perfData)
+ deduplicateIISNames(c.perfDataObjectWebService)
- for name, app := range perfData {
- if c.config.SiteExclude.MatchString(name) || !c.config.SiteInclude.MatchString(name) {
+ for _, data := range c.perfDataObjectWebService {
+ if c.config.SiteExclude.MatchString(data.Name) || !c.config.SiteInclude.MatchString(data.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentAnonymousUsers,
prometheus.GaugeValue,
- app[webServiceCurrentAnonymousUsers].FirstValue,
- name,
+ data.WebServiceCurrentAnonymousUsers,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentBlockedAsyncIORequests,
prometheus.GaugeValue,
- app[webServiceCurrentBlockedAsyncIORequests].FirstValue,
- name,
+ data.WebServiceCurrentBlockedAsyncIORequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentCGIRequests,
prometheus.GaugeValue,
- app[webServiceCurrentCGIRequests].FirstValue,
- name,
+ data.WebServiceCurrentCGIRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentConnections,
prometheus.GaugeValue,
- app[webServiceCurrentConnections].FirstValue,
- name,
+ data.WebServiceCurrentConnections,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentISAPIExtensionRequests,
prometheus.GaugeValue,
- app[webServiceCurrentISAPIExtensionRequests].FirstValue,
- name,
+ data.WebServiceCurrentISAPIExtensionRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentNonAnonymousUsers,
prometheus.GaugeValue,
- app[webServiceCurrentNonAnonymousUsers].FirstValue,
- name,
+ data.WebServiceCurrentNonAnonymousUsers,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceServiceUptime,
prometheus.GaugeValue,
- app[webServiceServiceUptime].FirstValue,
- name,
+ data.WebServiceServiceUptime,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalBytesReceived,
prometheus.CounterValue,
- app[webServiceTotalBytesReceived].FirstValue,
- name,
+ data.WebServiceTotalBytesReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalBytesSent,
prometheus.CounterValue,
- app[webServiceTotalBytesSent].FirstValue,
- name,
+ data.WebServiceTotalBytesSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalAnonymousUsers,
prometheus.CounterValue,
- app[webServiceTotalAnonymousUsers].FirstValue,
- name,
+ data.WebServiceTotalAnonymousUsers,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalBlockedAsyncIORequests,
prometheus.CounterValue,
- app[webServiceTotalBlockedAsyncIORequests].FirstValue,
- name,
+ data.WebServiceTotalBlockedAsyncIORequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalCGIRequests,
prometheus.CounterValue,
- app[webServiceTotalCGIRequests].FirstValue,
- name,
+ data.WebServiceTotalCGIRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalConnectionAttemptsAllInstances,
prometheus.CounterValue,
- app[webServiceTotalConnectionAttemptsAllInstances].FirstValue,
- name,
+ data.WebServiceTotalConnectionAttemptsAllInstances,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalFilesReceived,
prometheus.CounterValue,
- app[webServiceTotalFilesReceived].FirstValue,
- name,
+ data.WebServiceTotalFilesReceived,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalFilesSent,
prometheus.CounterValue,
- app[webServiceTotalFilesSent].FirstValue,
- name,
+ data.WebServiceTotalFilesSent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalISAPIExtensionRequests,
prometheus.CounterValue,
- app[webServiceTotalISAPIExtensionRequests].FirstValue,
- name,
+ data.WebServiceTotalISAPIExtensionRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalLockedErrors,
prometheus.CounterValue,
- app[webServiceTotalLockedErrors].FirstValue,
- name,
+ data.WebServiceTotalLockedErrors,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalLogonAttempts,
prometheus.CounterValue,
- app[webServiceTotalLogonAttempts].FirstValue,
- name,
+ data.WebServiceTotalLogonAttempts,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalNonAnonymousUsers,
prometheus.CounterValue,
- app[webServiceTotalNonAnonymousUsers].FirstValue,
- name,
+ data.WebServiceTotalNonAnonymousUsers,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalNotFoundErrors,
prometheus.CounterValue,
- app[webServiceTotalNotFoundErrors].FirstValue,
- name,
+ data.WebServiceTotalNotFoundErrors,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRejectedAsyncIORequests,
prometheus.CounterValue,
- app[webServiceTotalRejectedAsyncIORequests].FirstValue,
- name,
+ data.WebServiceTotalRejectedAsyncIORequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalOtherRequests].FirstValue,
- name,
+ data.WebServiceTotalOtherRequests,
+ data.Name,
"other",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalCopyRequests].FirstValue,
- name,
+ data.WebServiceTotalCopyRequests,
+ data.Name,
"COPY",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalDeleteRequests].FirstValue,
- name,
+ data.WebServiceTotalDeleteRequests,
+ data.Name,
"DELETE",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalGetRequests].FirstValue,
- name,
+ data.WebServiceTotalGetRequests,
+ data.Name,
"GET",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalHeadRequests].FirstValue,
- name,
+ data.WebServiceTotalHeadRequests,
+ data.Name,
"HEAD",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalLockRequests].FirstValue,
- name,
+ data.WebServiceTotalLockRequests,
+ data.Name,
"LOCK",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalMkcolRequests].FirstValue,
- name,
+ data.WebServiceTotalMkcolRequests,
+ data.Name,
"MKCOL",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalMoveRequests].FirstValue,
- name,
+ data.WebServiceTotalMoveRequests,
+ data.Name,
"MOVE",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalOptionsRequests].FirstValue,
- name,
+ data.WebServiceTotalOptionsRequests,
+ data.Name,
"OPTIONS",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalPostRequests].FirstValue,
- name,
+ data.WebServiceTotalPostRequests,
+ data.Name,
"POST",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalPropfindRequests].FirstValue,
- name,
+ data.WebServiceTotalPropfindRequests,
+ data.Name,
"PROPFIND",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalProppatchRequests].FirstValue,
- name,
+ data.WebServiceTotalProppatchRequests,
+ data.Name,
"PROPPATCH",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalPutRequests].FirstValue,
- name,
+ data.WebServiceTotalPutRequests,
+ data.Name,
"PUT",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalSearchRequests].FirstValue,
- name,
+ data.WebServiceTotalSearchRequests,
+ data.Name,
"SEARCH",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalTraceRequests].FirstValue,
- name,
+ data.WebServiceTotalTraceRequests,
+ data.Name,
"TRACE",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
- app[webServiceTotalUnlockRequests].FirstValue,
- name,
+ data.WebServiceTotalUnlockRequests,
+ data.Name,
"UNLOCK",
)
}
diff --git a/internal/collector/iis/iis_web_service_cache.go b/internal/collector/iis/iis_web_service_cache.go
index cd476659c..08a916109 100644
--- a/internal/collector/iis/iis_web_service_cache.go
+++ b/internal/collector/iis/iis_web_service_cache.go
@@ -18,13 +18,14 @@ package iis
import (
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorWebServiceCache struct {
- serviceCachePerfDataCollector *perfdata.Collector
+ serviceCachePerfDataCollector *pdh.Collector
+ perfDataObjectServiceCache []perfDataCounterServiceCache
serviceCacheActiveFlushedEntries *prometheus.Desc
@@ -60,82 +61,53 @@ type collectorWebServiceCache struct {
serviceCacheOutputCacheFlushesTotal *prometheus.Desc
}
-const (
- serviceCacheActiveFlushedEntries = "Active Flushed Entries"
- serviceCacheCurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
- serviceCacheMaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
- serviceCacheFileCacheFlushesTotal = "File Cache Flushes"
- serviceCacheFileCacheHitsTotal = "File Cache Hits"
- serviceCacheFileCacheMissesTotal = "File Cache Misses"
- serviceCacheFilesCached = "Current Files Cached"
- serviceCacheFilesCachedTotal = "Total Files Cached"
- serviceCacheFilesFlushedTotal = "Total Flushed Files"
- serviceCacheURICacheFlushesTotal = "Total Flushed URIs"
- serviceCacheURICacheFlushesTotalKernel = "Total Flushed URIs"
- serviceCacheURIsFlushedTotalKernel = "Kernel: Total Flushed URIs"
- serviceCacheURICacheHitsTotal = "URI Cache Hits"
- serviceCacheURICacheHitsTotalKernel = "Kernel: URI Cache Hits"
- serviceCacheURICacheMissesTotal = "URI Cache Misses"
- serviceCacheURICacheMissesTotalKernel = "Kernel: URI Cache Misses"
- serviceCacheURIsCached = "Current URIs Cached"
- serviceCacheURIsCachedKernel = "Kernel: Current URIs Cached"
- serviceCacheURIsCachedTotal = "Total URIs Cached"
- serviceCacheURIsCachedTotalKernel = "Total URIs Cached"
- serviceCacheURIsFlushedTotal = "Total Flushed URIs"
- serviceCacheMetaDataCacheHits = "Metadata Cache Hits"
- serviceCacheMetaDataCacheMisses = "Metadata Cache Misses"
- serviceCacheMetadataCached = "Current Metadata Cached"
- serviceCacheMetadataCacheFlushes = "Metadata Cache Flushes"
- serviceCacheMetadataCachedTotal = "Total Metadata Cached"
- serviceCacheMetadataFlushedTotal = "Total Flushed Metadata"
- serviceCacheOutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
- serviceCacheOutputCacheItems = "Output Cache Current Items"
- serviceCacheOutputCacheMemoryUsage = "Output Cache Current Memory Usage"
- serviceCacheOutputCacheHitsTotal = "Output Cache Total Hits"
- serviceCacheOutputCacheMissesTotal = "Output Cache Total Misses"
- serviceCacheOutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
- serviceCacheOutputCacheFlushesTotal = "Output Cache Total Flushes"
-)
+type perfDataCounterServiceCache struct {
+ Name string
+
+ ServiceCacheActiveFlushedEntries float64 `perfdata:"Active Flushed Entries"`
+ ServiceCacheCurrentFileCacheMemoryUsage float64 `perfdata:"Current File Cache Memory Usage"`
+ ServiceCacheMaximumFileCacheMemoryUsage float64 `perfdata:"Maximum File Cache Memory Usage"`
+ ServiceCacheFileCacheFlushesTotal float64 `perfdata:"File Cache Flushes"`
+ ServiceCacheFileCacheHitsTotal float64 `perfdata:"File Cache Hits"`
+ ServiceCacheFileCacheMissesTotal float64 `perfdata:"File Cache Misses"`
+ ServiceCacheFilesCached float64 `perfdata:"Current Files Cached"`
+ ServiceCacheFilesCachedTotal float64 `perfdata:"Total Files Cached"`
+ ServiceCacheFilesFlushedTotal float64 `perfdata:"Total Flushed Files"`
+ ServiceCacheURICacheFlushesTotal float64 `perfdata:"Total Flushed URIs"`
+ ServiceCacheURICacheFlushesTotalKernel float64 `perfdata:"Total Flushed URIs"`
+ ServiceCacheURIsFlushedTotalKernel float64 `perfdata:"Kernel: Total Flushed URIs"`
+ ServiceCacheURICacheHitsTotal float64 `perfdata:"URI Cache Hits"`
+ ServiceCacheURICacheHitsTotalKernel float64 `perfdata:"Kernel: URI Cache Hits"`
+ ServiceCacheURICacheMissesTotal float64 `perfdata:"URI Cache Misses"`
+ ServiceCacheURICacheMissesTotalKernel float64 `perfdata:"Kernel: URI Cache Misses"`
+ ServiceCacheURIsCached float64 `perfdata:"Current URIs Cached"`
+ ServiceCacheURIsCachedKernel float64 `perfdata:"Kernel: Current URIs Cached"`
+ ServiceCacheURIsCachedTotal float64 `perfdata:"Total URIs Cached"`
+ ServiceCacheURIsCachedTotalKernel float64 `perfdata:"Total URIs Cached"`
+ ServiceCacheURIsFlushedTotal float64 `perfdata:"Total Flushed URIs"`
+ ServiceCacheMetaDataCacheHits float64 `perfdata:"Metadata Cache Hits"`
+ ServiceCacheMetaDataCacheMisses float64 `perfdata:"Metadata Cache Misses"`
+ ServiceCacheMetadataCached float64 `perfdata:"Current Metadata Cached"`
+ ServiceCacheMetadataCacheFlushes float64 `perfdata:"Metadata Cache Flushes"`
+ ServiceCacheMetadataCachedTotal float64 `perfdata:"Total Metadata Cached"`
+ ServiceCacheMetadataFlushedTotal float64 `perfdata:"Total Flushed Metadata"`
+ ServiceCacheOutputCacheActiveFlushedItems float64 `perfdata:"Output Cache Current Flushed Items"`
+ ServiceCacheOutputCacheItems float64 `perfdata:"Output Cache Current Items"`
+ ServiceCacheOutputCacheMemoryUsage float64 `perfdata:"Output Cache Current Memory Usage"`
+ ServiceCacheOutputCacheHitsTotal float64 `perfdata:"Output Cache Total Hits"`
+ ServiceCacheOutputCacheMissesTotal float64 `perfdata:"Output Cache Total Misses"`
+ ServiceCacheOutputCacheFlushedItemsTotal float64 `perfdata:"Output Cache Total Flushed Items"`
+ ServiceCacheOutputCacheFlushesTotal float64 `perfdata:"Output Cache Total Flushes"`
+}
+
+func (p perfDataCounterServiceCache) GetName() string {
+ return p.Name
+}
func (c *Collector) buildWebServiceCache() error {
var err error
- c.serviceCachePerfDataCollector, err = perfdata.NewCollector("Web Service Cache", perfdata.InstancesAll, []string{
- serviceCacheActiveFlushedEntries,
- serviceCacheCurrentFileCacheMemoryUsage,
- serviceCacheMaximumFileCacheMemoryUsage,
- serviceCacheFileCacheFlushesTotal,
- serviceCacheFileCacheHitsTotal,
- serviceCacheFileCacheMissesTotal,
- serviceCacheFilesCached,
- serviceCacheFilesCachedTotal,
- serviceCacheFilesFlushedTotal,
- serviceCacheURICacheFlushesTotal,
- serviceCacheURICacheFlushesTotalKernel,
- serviceCacheURIsFlushedTotalKernel,
- serviceCacheURICacheHitsTotal,
- serviceCacheURICacheHitsTotalKernel,
- serviceCacheURICacheMissesTotal,
- serviceCacheURICacheMissesTotalKernel,
- serviceCacheURIsCached,
- serviceCacheURIsCachedKernel,
- serviceCacheURIsCachedTotal,
- serviceCacheURIsCachedTotalKernel,
- serviceCacheURIsFlushedTotal,
- serviceCacheMetaDataCacheHits,
- serviceCacheMetaDataCacheMisses,
- serviceCacheMetadataCached,
- serviceCacheMetadataCacheFlushes,
- serviceCacheMetadataCachedTotal,
- serviceCacheMetadataFlushedTotal,
- serviceCacheOutputCacheActiveFlushedItems,
- serviceCacheOutputCacheItems,
- serviceCacheOutputCacheMemoryUsage,
- serviceCacheOutputCacheHitsTotal,
- serviceCacheOutputCacheMissesTotal,
- serviceCacheOutputCacheFlushedItemsTotal,
- serviceCacheOutputCacheFlushesTotal,
- })
+ c.serviceCachePerfDataCollector, err = pdh.NewCollector[perfDataCounterServiceCache]("Web Service Cache", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Web Service Cache collector: %w", err)
}
@@ -314,199 +286,199 @@ func (c *Collector) buildWebServiceCache() error {
}
func (c *Collector) collectWebServiceCache(ch chan<- prometheus.Metric) error {
- perfData, err := c.serviceCachePerfDataCollector.Collect()
+ err := c.serviceCachePerfDataCollector.Collect(&c.perfDataObjectServiceCache)
if err != nil {
return fmt.Errorf("failed to collect Web Service Cache metrics: %w", err)
}
- deduplicateIISNames(perfData)
+ deduplicateIISNames(c.perfDataObjectServiceCache)
- for name, app := range perfData {
- if c.config.SiteExclude.MatchString(name) || !c.config.SiteInclude.MatchString(name) {
+ for _, data := range c.perfDataObjectServiceCache {
+ if c.config.SiteExclude.MatchString(data.Name) || !c.config.SiteInclude.MatchString(data.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.serviceCacheActiveFlushedEntries,
prometheus.GaugeValue,
- app[serviceCacheActiveFlushedEntries].FirstValue,
+ data.ServiceCacheActiveFlushedEntries,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheCurrentFileCacheMemoryUsage,
prometheus.GaugeValue,
- app[serviceCacheCurrentFileCacheMemoryUsage].FirstValue,
+ data.ServiceCacheCurrentFileCacheMemoryUsage,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMaximumFileCacheMemoryUsage,
prometheus.CounterValue,
- app[serviceCacheMaximumFileCacheMemoryUsage].FirstValue,
+ data.ServiceCacheMaximumFileCacheMemoryUsage,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheFlushesTotal,
prometheus.CounterValue,
- app[serviceCacheFileCacheFlushesTotal].FirstValue,
+ data.ServiceCacheFileCacheFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheQueriesTotal,
prometheus.CounterValue,
- app[serviceCacheFileCacheHitsTotal].FirstValue+app[serviceCacheFileCacheMissesTotal].FirstValue,
+ data.ServiceCacheFileCacheHitsTotal+data.ServiceCacheFileCacheMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheHitsTotal,
prometheus.CounterValue,
- app[serviceCacheFileCacheHitsTotal].FirstValue,
+ data.ServiceCacheFileCacheHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesCached,
prometheus.GaugeValue,
- app[serviceCacheFilesCached].FirstValue,
+ data.ServiceCacheFilesCached,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesCachedTotal,
prometheus.CounterValue,
- app[serviceCacheFilesCachedTotal].FirstValue,
+ data.ServiceCacheFilesCachedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesFlushedTotal,
prometheus.CounterValue,
- app[serviceCacheFilesFlushedTotal].FirstValue,
+ data.ServiceCacheFilesFlushedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheFlushesTotal,
prometheus.CounterValue,
- app[serviceCacheURICacheFlushesTotal].FirstValue,
+ data.ServiceCacheURICacheFlushesTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheFlushesTotal,
prometheus.CounterValue,
- app[serviceCacheURICacheFlushesTotalKernel].FirstValue,
+ data.ServiceCacheURICacheFlushesTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheQueriesTotal,
prometheus.CounterValue,
- app[serviceCacheURICacheHitsTotal].FirstValue+app[serviceCacheURICacheMissesTotal].FirstValue,
+ data.ServiceCacheURICacheHitsTotal+data.ServiceCacheURICacheMissesTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheQueriesTotal,
prometheus.CounterValue,
- app[serviceCacheURICacheHitsTotalKernel].FirstValue+app[serviceCacheURICacheMissesTotalKernel].FirstValue,
+ data.ServiceCacheURICacheHitsTotalKernel+data.ServiceCacheURICacheMissesTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheHitsTotal,
prometheus.CounterValue,
- app[serviceCacheURICacheHitsTotal].FirstValue,
+ data.ServiceCacheURICacheHitsTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheHitsTotal,
prometheus.CounterValue,
- app[serviceCacheURICacheHitsTotalKernel].FirstValue,
+ data.ServiceCacheURICacheHitsTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCached,
prometheus.GaugeValue,
- app[serviceCacheURIsCached].FirstValue,
+ data.ServiceCacheURIsCached,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCached,
prometheus.GaugeValue,
- app[serviceCacheURIsCachedKernel].FirstValue,
+ data.ServiceCacheURIsCachedKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCachedTotal,
prometheus.CounterValue,
- app[serviceCacheURIsCachedTotal].FirstValue,
+ data.ServiceCacheURIsCachedTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCachedTotal,
prometheus.CounterValue,
- app[serviceCacheURIsCachedTotalKernel].FirstValue,
+ data.ServiceCacheURIsCachedTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsFlushedTotal,
prometheus.CounterValue,
- app[serviceCacheURIsFlushedTotal].FirstValue,
+ data.ServiceCacheURIsFlushedTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsFlushedTotal,
prometheus.CounterValue,
- app[serviceCacheURIsFlushedTotalKernel].FirstValue,
+ data.ServiceCacheURIsFlushedTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCached,
prometheus.GaugeValue,
- app[serviceCacheMetadataCached].FirstValue,
+ data.ServiceCacheMetadataCached,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheFlushes,
prometheus.CounterValue,
- app[serviceCacheMetadataCacheFlushes].FirstValue,
+ data.ServiceCacheMetadataCacheFlushes,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheQueriesTotal,
prometheus.CounterValue,
- app[serviceCacheMetaDataCacheHits].FirstValue+app[serviceCacheMetaDataCacheMisses].FirstValue,
+ data.ServiceCacheMetaDataCacheHits+data.ServiceCacheMetaDataCacheMisses,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheHitsTotal,
prometheus.CounterValue,
- 0, // app[serviceCacheMetadataCacheHitsTotal].FirstValue,
+ 0, // data.ServiceCacheMetadataCacheHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCachedTotal,
prometheus.CounterValue,
- app[serviceCacheMetadataCachedTotal].FirstValue,
+ data.ServiceCacheMetadataCachedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataFlushedTotal,
prometheus.CounterValue,
- app[serviceCacheMetadataFlushedTotal].FirstValue,
+ data.ServiceCacheMetadataFlushedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheActiveFlushedItems,
prometheus.CounterValue,
- app[serviceCacheOutputCacheActiveFlushedItems].FirstValue,
+ data.ServiceCacheOutputCacheActiveFlushedItems,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheItems,
prometheus.CounterValue,
- app[serviceCacheOutputCacheItems].FirstValue,
+ data.ServiceCacheOutputCacheItems,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheMemoryUsage,
prometheus.CounterValue,
- app[serviceCacheOutputCacheMemoryUsage].FirstValue,
+ data.ServiceCacheOutputCacheMemoryUsage,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheQueriesTotal,
prometheus.CounterValue,
- app[serviceCacheOutputCacheHitsTotal].FirstValue+app[serviceCacheOutputCacheMissesTotal].FirstValue,
+ data.ServiceCacheOutputCacheHitsTotal+data.ServiceCacheOutputCacheMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheHitsTotal,
prometheus.CounterValue,
- app[serviceCacheOutputCacheHitsTotal].FirstValue,
+ data.ServiceCacheOutputCacheHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheFlushedItemsTotal,
prometheus.CounterValue,
- app[serviceCacheOutputCacheFlushedItemsTotal].FirstValue,
+ data.ServiceCacheOutputCacheFlushedItemsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheFlushesTotal,
prometheus.CounterValue,
- app[serviceCacheOutputCacheFlushesTotal].FirstValue,
+ data.ServiceCacheOutputCacheFlushesTotal,
)
}
diff --git a/internal/collector/logical_disk/const.go b/internal/collector/logical_disk/const.go
deleted file mode 100644
index 15b71f172..000000000
--- a/internal/collector/logical_disk/const.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package logical_disk
-
-const (
- avgDiskReadQueueLength = "Avg. Disk Read Queue Length"
- avgDiskSecPerRead = "Avg. Disk sec/Read"
- avgDiskSecPerTransfer = "Avg. Disk sec/Transfer"
- avgDiskSecPerWrite = "Avg. Disk sec/Write"
- avgDiskWriteQueueLength = "Avg. Disk Write Queue Length"
- currentDiskQueueLength = "Current Disk Queue Length"
- freeSpace = "Free Megabytes"
- diskReadBytesPerSec = "Disk Read Bytes/sec"
- diskReadsPerSec = "Disk Reads/sec"
- diskWriteBytesPerSec = "Disk Write Bytes/sec"
- diskWritesPerSec = "Disk Writes/sec"
- percentDiskReadTime = "% Disk Read Time"
- percentDiskWriteTime = "% Disk Write Time"
- percentFreeSpace = "% Free Space"
- percentIdleTime = "% Idle Time"
- splitIOPerSec = "Split IO/Sec"
-)
diff --git a/internal/collector/logical_disk/logical_disk.go b/internal/collector/logical_disk/logical_disk.go
index 31dea5f88..f6f0b6a4e 100644
--- a/internal/collector/logical_disk/logical_disk.go
+++ b/internal/collector/logical_disk/logical_disk.go
@@ -27,7 +27,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@@ -51,7 +51,8 @@ type Collector struct {
config Config
logger *slog.Logger
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
avgReadQueue *prometheus.Desc
avgWriteQueue *prometheus.Desc
@@ -151,24 +152,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("LogicalDisk", perfdata.InstancesAll, []string{
- currentDiskQueueLength,
- avgDiskReadQueueLength,
- avgDiskWriteQueueLength,
- diskReadBytesPerSec,
- diskReadsPerSec,
- diskWriteBytesPerSec,
- diskWritesPerSec,
- percentDiskReadTime,
- percentDiskWriteTime,
- percentFreeSpace,
- freeSpace,
- percentIdleTime,
- splitIOPerSec,
- avgDiskSecPerRead,
- avgDiskSecPerWrite,
- avgDiskSecPerTransfer,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("LogicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
@@ -303,12 +287,9 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- var (
- err error
- info volumeInfo
- )
+ var info volumeInfo
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect LogicalDisk metrics: %w", err)
}
@@ -318,14 +299,14 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return fmt.Errorf("failed to get volumes: %w", err)
}
- for name, volume := range perfData {
- if c.config.VolumeExclude.MatchString(name) || !c.config.VolumeInclude.MatchString(name) {
+ for _, data := range c.perfDataObject {
+ if c.config.VolumeExclude.MatchString(data.Name) || !c.config.VolumeInclude.MatchString(data.Name) {
continue
}
- info, err = getVolumeInfo(volumes, name)
+ info, err = getVolumeInfo(volumes, data.Name)
if err != nil {
- c.logger.Warn("failed to get volume information for "+name,
+ c.logger.Warn("failed to get volume information for "+data.Name,
slog.Any("err", err),
)
}
@@ -336,7 +317,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
1,
info.diskIDs,
info.volumeType,
- name,
+ data.Name,
info.label,
info.filesystem,
info.serialNumber,
@@ -345,113 +326,113 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.requestsQueued,
prometheus.GaugeValue,
- volume[currentDiskQueueLength].FirstValue,
- name,
+ data.CurrentDiskQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgReadQueue,
prometheus.GaugeValue,
- volume[avgDiskReadQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
- name,
+ data.AvgDiskReadQueueLength*pdh.TicksToSecondScaleFactor,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue,
prometheus.GaugeValue,
- volume[avgDiskWriteQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
- name,
+ data.AvgDiskWriteQueueLength*pdh.TicksToSecondScaleFactor,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
- volume[diskReadBytesPerSec].FirstValue,
- name,
+ data.DiskReadBytesPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
- volume[diskReadsPerSec].FirstValue,
- name,
+ data.DiskReadsPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
- volume[diskWriteBytesPerSec].FirstValue,
- name,
+ data.DiskWriteBytesPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
- volume[diskWritesPerSec].FirstValue,
- name,
+ data.DiskWritesPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readTime,
prometheus.CounterValue,
- volume[percentDiskReadTime].FirstValue,
- name,
+ data.PercentDiskReadTime,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeTime,
prometheus.CounterValue,
- volume[percentDiskWriteTime].FirstValue,
- name,
+ data.PercentDiskWriteTime,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.freeSpace,
prometheus.GaugeValue,
- volume[freeSpace].FirstValue*1024*1024,
- name,
+ data.FreeSpace*1024*1024,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalSpace,
prometheus.GaugeValue,
- volume[percentFreeSpace].SecondValue*1024*1024,
- name,
+ data.PercentFreeSpace*1024*1024,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.idleTime,
prometheus.CounterValue,
- volume[percentIdleTime].FirstValue,
- name,
+ data.PercentIdleTime,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.splitIOs,
prometheus.CounterValue,
- volume[splitIOPerSec].FirstValue,
- name,
+ data.SplitIOPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
- volume[avgDiskSecPerRead].FirstValue*perfdata.TicksToSecondScaleFactor,
- name,
+ data.AvgDiskSecPerRead*pdh.TicksToSecondScaleFactor,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
- volume[avgDiskSecPerWrite].FirstValue*perfdata.TicksToSecondScaleFactor,
- name,
+ data.AvgDiskSecPerWrite*pdh.TicksToSecondScaleFactor,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
- volume[avgDiskSecPerTransfer].FirstValue*perfdata.TicksToSecondScaleFactor,
- name,
+ data.AvgDiskSecPerTransfer*pdh.TicksToSecondScaleFactor,
+ data.Name,
)
}
@@ -506,7 +487,9 @@ func getVolumeInfo(volumes map[string]string, rootDrive string) (volumeInfo, err
return volumeInfo{}, fmt.Errorf("could not open volume for %s: %w", rootDrive, err)
}
- defer windows.Close(volumeHandle)
+ defer func(fd windows.Handle) {
+ _ = windows.Close(fd)
+ }(volumeHandle)
controlCode := uint32(5636096) // IOCTL_VOLUME_GET_VOLUME_DISK_EXTENTS
volumeDiskExtents := make([]byte, 16*1024)
diff --git a/internal/collector/logical_disk/logical_disk_test.go b/internal/collector/logical_disk/logical_disk_test.go
index a9f3b1a5e..70d2ee74d 100644
--- a/internal/collector/logical_disk/logical_disk_test.go
+++ b/internal/collector/logical_disk/logical_disk_test.go
@@ -27,8 +27,10 @@ import (
func BenchmarkCollector(b *testing.B) {
// Whitelist is not set in testing context (kingpin flags not parsed), causing the Collector to skip all disks.
localVolumeInclude := ".+"
- kingpin.CommandLine.GetArg("collector.logical_disk.volume-include").StringVar(&localVolumeInclude)
- testutils.FuncBenchmarkCollector(b, "logical_disk", logical_disk.NewWithFlags)
+
+ testutils.FuncBenchmarkCollector(b, "logical_disk", logical_disk.NewWithFlags, func(app *kingpin.Application) {
+ app.GetFlag("collector.logical_disk.volume-include").StringVar(&localVolumeInclude)
+ })
}
func TestCollector(t *testing.T) {
diff --git a/internal/collector/logical_disk/types.go b/internal/collector/logical_disk/types.go
new file mode 100644
index 000000000..8fd5e3161
--- /dev/null
+++ b/internal/collector/logical_disk/types.go
@@ -0,0 +1,37 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package logical_disk
+
+type perfDataCounterValues struct {
+ Name string
+
+ AvgDiskReadQueueLength float64 `perfdata:"Avg. Disk Read Queue Length"`
+ AvgDiskSecPerRead float64 `perfdata:"Avg. Disk sec/Read"`
+ AvgDiskSecPerTransfer float64 `perfdata:"Avg. Disk sec/Transfer"`
+ AvgDiskSecPerWrite float64 `perfdata:"Avg. Disk sec/Write"`
+ AvgDiskWriteQueueLength float64 `perfdata:"Avg. Disk Write Queue Length"`
+ CurrentDiskQueueLength float64 `perfdata:"Current Disk Queue Length"`
+ FreeSpace float64 `perfdata:"Free Megabytes"`
+ DiskReadBytesPerSec float64 `perfdata:"Disk Read Bytes/sec"`
+ DiskReadsPerSec float64 `perfdata:"Disk Reads/sec"`
+ DiskWriteBytesPerSec float64 `perfdata:"Disk Write Bytes/sec"`
+ DiskWritesPerSec float64 `perfdata:"Disk Writes/sec"`
+ PercentDiskReadTime float64 `perfdata:"% Disk Read Time"`
+ PercentDiskWriteTime float64 `perfdata:"% Disk Write Time"`
+ PercentFreeSpace float64 `perfdata:"% Free Space,secondvalue"`
+ PercentIdleTime float64 `perfdata:"% Idle Time"`
+ SplitIOPerSec float64 `perfdata:"Split IO/Sec"`
+}
diff --git a/internal/collector/memory/const.go b/internal/collector/memory/const.go
deleted file mode 100644
index 9951cde74..000000000
--- a/internal/collector/memory/const.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package memory
-
-const (
- availableBytes = "Available Bytes"
- availableKBytes = "Available KBytes"
- availableMBytes = "Available MBytes"
- cacheBytes = "Cache Bytes"
- cacheBytesPeak = "Cache Bytes Peak"
- cacheFaultsPerSec = "Cache Faults/sec"
- commitLimit = "Commit Limit"
- committedBytes = "Committed Bytes"
- demandZeroFaultsPerSec = "Demand Zero Faults/sec"
- freeAndZeroPageListBytes = "Free & Zero Page List Bytes"
- freeSystemPageTableEntries = "Free System Page Table Entries"
- modifiedPageListBytes = "Modified Page List Bytes"
- pageFaultsPerSec = "Page Faults/sec"
- pageReadsPerSec = "Page Reads/sec"
- pagesInputPerSec = "Pages Input/sec"
- pagesOutputPerSec = "Pages Output/sec"
- pagesPerSec = "Pages/sec"
- pageWritesPerSec = "Page Writes/sec"
- poolNonpagedAllocs = "Pool Nonpaged Allocs"
- poolNonpagedBytes = "Pool Nonpaged Bytes"
- poolPagedAllocs = "Pool Paged Allocs"
- poolPagedBytes = "Pool Paged Bytes"
- poolPagedResidentBytes = "Pool Paged Resident Bytes"
- standbyCacheCoreBytes = "Standby Cache Core Bytes"
- standbyCacheNormalPriorityBytes = "Standby Cache Normal Priority Bytes"
- standbyCacheReserveBytes = "Standby Cache Reserve Bytes"
- systemCacheResidentBytes = "System Cache Resident Bytes"
- systemCodeResidentBytes = "System Code Resident Bytes"
- systemCodeTotalBytes = "System Code Total Bytes"
- systemDriverResidentBytes = "System Driver Resident Bytes"
- systemDriverTotalBytes = "System Driver Total Bytes"
- transitionFaultsPerSec = "Transition Faults/sec"
- transitionPagesRePurposedPerSec = "Transition Pages RePurposed/sec"
- writeCopiesPerSec = "Write Copies/sec"
-)
diff --git a/internal/collector/memory/memory.go b/internal/collector/memory/memory.go
index a3eab5c77..9f53deb5c 100644
--- a/internal/collector/memory/memory.go
+++ b/internal/collector/memory/memory.go
@@ -26,7 +26,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -42,7 +42,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
// Performance metrics
availableBytes *prometheus.Desc
@@ -109,46 +110,9 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
- counters := []string{
- availableBytes,
- availableKBytes,
- availableMBytes,
- cacheBytes,
- cacheBytesPeak,
- cacheFaultsPerSec,
- commitLimit,
- committedBytes,
- demandZeroFaultsPerSec,
- freeAndZeroPageListBytes,
- freeSystemPageTableEntries,
- modifiedPageListBytes,
- pageFaultsPerSec,
- pageReadsPerSec,
- pagesInputPerSec,
- pagesOutputPerSec,
- pagesPerSec,
- pageWritesPerSec,
- poolNonpagedAllocs,
- poolNonpagedBytes,
- poolPagedAllocs,
- poolPagedBytes,
- poolPagedResidentBytes,
- standbyCacheCoreBytes,
- standbyCacheNormalPriorityBytes,
- standbyCacheReserveBytes,
- systemCacheResidentBytes,
- systemCodeResidentBytes,
- systemCodeTotalBytes,
- systemDriverResidentBytes,
- systemDriverTotalBytes,
- transitionFaultsPerSec,
- transitionPagesRePurposedPerSec,
- writeCopiesPerSec,
- }
-
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Memory", perfdata.InstancesAll, counters)
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Memory", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Memory collector: %w", err)
}
@@ -423,207 +387,201 @@ func (c *Collector) collectGlobalMemoryStatus(ch chan<- prometheus.Metric) error
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Memory metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
-
- if !ok {
- return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.availableBytes,
prometheus.GaugeValue,
- data[availableBytes].FirstValue,
+ c.perfDataObject[0].AvailableBytes,
)
ch <- prometheus.MustNewConstMetric(
c.cacheBytes,
prometheus.GaugeValue,
- data[cacheBytes].FirstValue,
+ c.perfDataObject[0].CacheBytes,
)
ch <- prometheus.MustNewConstMetric(
c.cacheBytesPeak,
prometheus.GaugeValue,
- data[cacheBytesPeak].FirstValue,
+ c.perfDataObject[0].CacheBytesPeak,
)
ch <- prometheus.MustNewConstMetric(
c.cacheFaultsTotal,
prometheus.CounterValue,
- data[cacheFaultsPerSec].FirstValue,
+ c.perfDataObject[0].CacheFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.commitLimit,
prometheus.GaugeValue,
- data[commitLimit].FirstValue,
+ c.perfDataObject[0].CommitLimit,
)
ch <- prometheus.MustNewConstMetric(
c.committedBytes,
prometheus.GaugeValue,
- data[committedBytes].FirstValue,
+ c.perfDataObject[0].CommittedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.demandZeroFaultsTotal,
prometheus.CounterValue,
- data[demandZeroFaultsPerSec].FirstValue,
+ c.perfDataObject[0].DemandZeroFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.freeAndZeroPageListBytes,
prometheus.GaugeValue,
- data[freeAndZeroPageListBytes].FirstValue,
+ c.perfDataObject[0].FreeAndZeroPageListBytes,
)
ch <- prometheus.MustNewConstMetric(
c.freeSystemPageTableEntries,
prometheus.GaugeValue,
- data[freeSystemPageTableEntries].FirstValue,
+ c.perfDataObject[0].FreeSystemPageTableEntries,
)
ch <- prometheus.MustNewConstMetric(
c.modifiedPageListBytes,
prometheus.GaugeValue,
- data[modifiedPageListBytes].FirstValue,
+ c.perfDataObject[0].ModifiedPageListBytes,
)
ch <- prometheus.MustNewConstMetric(
c.pageFaultsTotal,
prometheus.CounterValue,
- data[pageFaultsPerSec].FirstValue,
+ c.perfDataObject[0].PageFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageReadsTotal,
prometheus.CounterValue,
- data[pageReadsPerSec].FirstValue,
+ c.perfDataObject[0].PageReadsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPagesReadTotal,
prometheus.CounterValue,
- data[pagesInputPerSec].FirstValue,
+ c.perfDataObject[0].PagesInputPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPagesWrittenTotal,
prometheus.CounterValue,
- data[pagesOutputPerSec].FirstValue,
+ c.perfDataObject[0].PagesOutputPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageOperationsTotal,
prometheus.CounterValue,
- data[pagesPerSec].FirstValue,
+ c.perfDataObject[0].PagesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageWritesTotal,
prometheus.CounterValue,
- data[pageWritesPerSec].FirstValue,
+ c.perfDataObject[0].PageWritesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedAllocationsTotal,
prometheus.GaugeValue,
- data[poolNonpagedAllocs].FirstValue,
+ c.perfDataObject[0].PoolNonpagedAllocs,
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedBytes,
prometheus.GaugeValue,
- data[poolNonpagedBytes].FirstValue,
+ c.perfDataObject[0].PoolNonpagedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedAllocationsTotal,
prometheus.CounterValue,
- data[poolPagedAllocs].FirstValue,
+ c.perfDataObject[0].PoolPagedAllocs,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedBytes,
prometheus.GaugeValue,
- data[poolPagedBytes].FirstValue,
+ c.perfDataObject[0].PoolPagedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedResidentBytes,
prometheus.GaugeValue,
- data[poolPagedResidentBytes].FirstValue,
+ c.perfDataObject[0].PoolPagedResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheCoreBytes,
prometheus.GaugeValue,
- data[standbyCacheCoreBytes].FirstValue,
+ c.perfDataObject[0].StandbyCacheCoreBytes,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheNormalPriorityBytes,
prometheus.GaugeValue,
- data[standbyCacheNormalPriorityBytes].FirstValue,
+ c.perfDataObject[0].StandbyCacheNormalPriorityBytes,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheReserveBytes,
prometheus.GaugeValue,
- data[standbyCacheReserveBytes].FirstValue,
+ c.perfDataObject[0].StandbyCacheReserveBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCacheResidentBytes,
prometheus.GaugeValue,
- data[systemCacheResidentBytes].FirstValue,
+ c.perfDataObject[0].SystemCacheResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCodeResidentBytes,
prometheus.GaugeValue,
- data[systemCodeResidentBytes].FirstValue,
+ c.perfDataObject[0].SystemCodeResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCodeTotalBytes,
prometheus.GaugeValue,
- data[systemCodeTotalBytes].FirstValue,
+ c.perfDataObject[0].SystemCodeTotalBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemDriverResidentBytes,
prometheus.GaugeValue,
- data[systemDriverResidentBytes].FirstValue,
+ c.perfDataObject[0].SystemDriverResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemDriverTotalBytes,
prometheus.GaugeValue,
- data[systemDriverTotalBytes].FirstValue,
+ c.perfDataObject[0].SystemDriverTotalBytes,
)
ch <- prometheus.MustNewConstMetric(
c.transitionFaultsTotal,
prometheus.CounterValue,
- data[transitionFaultsPerSec].FirstValue,
+ c.perfDataObject[0].TransitionFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.transitionPagesRepurposedTotal,
prometheus.CounterValue,
- data[transitionPagesRePurposedPerSec].FirstValue,
+ c.perfDataObject[0].TransitionPagesRePurposedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.writeCopiesTotal,
prometheus.CounterValue,
- data[writeCopiesPerSec].FirstValue,
+ c.perfDataObject[0].WriteCopiesPerSec,
)
return nil
diff --git a/internal/collector/memory/types.go b/internal/collector/memory/types.go
new file mode 100644
index 000000000..7de27e4a2
--- /dev/null
+++ b/internal/collector/memory/types.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package memory
+
+type perfDataCounterValues struct {
+ AvailableBytes float64 `perfdata:"Available Bytes"`
+ AvailableKBytes float64 `perfdata:"Available KBytes"`
+ AvailableMBytes float64 `perfdata:"Available MBytes"`
+ CacheBytes float64 `perfdata:"Cache Bytes"`
+ CacheBytesPeak float64 `perfdata:"Cache Bytes Peak"`
+ CacheFaultsPerSec float64 `perfdata:"Cache Faults/sec"`
+ CommitLimit float64 `perfdata:"Commit Limit"`
+ CommittedBytes float64 `perfdata:"Committed Bytes"`
+ DemandZeroFaultsPerSec float64 `perfdata:"Demand Zero Faults/sec"`
+ FreeAndZeroPageListBytes float64 `perfdata:"Free & Zero Page List Bytes"`
+ FreeSystemPageTableEntries float64 `perfdata:"Free System Page Table Entries"`
+ ModifiedPageListBytes float64 `perfdata:"Modified Page List Bytes"`
+ PageFaultsPerSec float64 `perfdata:"Page Faults/sec"`
+ PageReadsPerSec float64 `perfdata:"Page Reads/sec"`
+ PagesInputPerSec float64 `perfdata:"Pages Input/sec"`
+ PagesOutputPerSec float64 `perfdata:"Pages Output/sec"`
+ PagesPerSec float64 `perfdata:"Pages/sec"`
+ PageWritesPerSec float64 `perfdata:"Page Writes/sec"`
+ PoolNonpagedAllocs float64 `perfdata:"Pool Nonpaged Allocs"`
+ PoolNonpagedBytes float64 `perfdata:"Pool Nonpaged Bytes"`
+ PoolPagedAllocs float64 `perfdata:"Pool Paged Allocs"`
+ PoolPagedBytes float64 `perfdata:"Pool Paged Bytes"`
+ PoolPagedResidentBytes float64 `perfdata:"Pool Paged Resident Bytes"`
+ StandbyCacheCoreBytes float64 `perfdata:"Standby Cache Core Bytes"`
+ StandbyCacheNormalPriorityBytes float64 `perfdata:"Standby Cache Normal Priority Bytes"`
+ StandbyCacheReserveBytes float64 `perfdata:"Standby Cache Reserve Bytes"`
+ SystemCacheResidentBytes float64 `perfdata:"System Cache Resident Bytes"`
+ SystemCodeResidentBytes float64 `perfdata:"System Code Resident Bytes"`
+ SystemCodeTotalBytes float64 `perfdata:"System Code Total Bytes"`
+ SystemDriverResidentBytes float64 `perfdata:"System Driver Resident Bytes"`
+ SystemDriverTotalBytes float64 `perfdata:"System Driver Total Bytes"`
+ TransitionFaultsPerSec float64 `perfdata:"Transition Faults/sec"`
+ TransitionPagesRePurposedPerSec float64 `perfdata:"Transition Pages RePurposed/sec"`
+ WriteCopiesPerSec float64 `perfdata:"Write Copies/sec"`
+}
diff --git a/internal/collector/msmq/msmq.go b/internal/collector/msmq/msmq.go
index 9d18c3e1d..3ff749ca8 100644
--- a/internal/collector/msmq/msmq.go
+++ b/internal/collector/msmq/msmq.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -36,7 +36,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics.
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
bytesInJournalQueue *prometheus.Desc
bytesInQueue *prometheus.Desc
@@ -75,12 +76,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("MSMQ Queue", perfdata.InstancesAll, []string{
- bytesInJournalQueue,
- bytesInQueue,
- messagesInJournalQueue,
- messagesInQueue,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("MSMQ Queue", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
}
@@ -116,38 +112,38 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect MSMQ Queue metrics: %w", err)
}
- for name, data := range perfData {
+ for _, data := range c.perfDataObject {
ch <- prometheus.MustNewConstMetric(
c.bytesInJournalQueue,
prometheus.GaugeValue,
- data[bytesInJournalQueue].FirstValue,
- name,
+ data.BytesInJournalQueue,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesInQueue,
prometheus.GaugeValue,
- data[bytesInQueue].FirstValue,
- name,
+ data.BytesInQueue,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesInJournalQueue,
prometheus.GaugeValue,
- data[messagesInJournalQueue].FirstValue,
- name,
+ data.MessagesInJournalQueue,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesInQueue,
prometheus.GaugeValue,
- data[messagesInQueue].FirstValue,
- name,
+ data.MessagesInQueue,
+ data.Name,
)
}
diff --git a/internal/collector/msmq/const.go b/internal/collector/msmq/types.go
similarity index 66%
rename from internal/collector/msmq/const.go
rename to internal/collector/msmq/types.go
index bce1b3eb2..3b2af0bf4 100644
--- a/internal/collector/msmq/const.go
+++ b/internal/collector/msmq/types.go
@@ -15,9 +15,11 @@
package msmq
-const (
- bytesInJournalQueue = "Bytes in Journal Queue"
- bytesInQueue = "Bytes in Queue"
- messagesInJournalQueue = "Messages in Journal Queue"
- messagesInQueue = "Messages in Queue"
-)
+type perfDataCounterValues struct {
+ Name string
+
+ BytesInJournalQueue float64 `perfdata:"Bytes in Journal Queue"`
+ BytesInQueue float64 `perfdata:"Bytes in Queue"`
+ MessagesInJournalQueue float64 `perfdata:"Messages in Journal Queue"`
+ MessagesInQueue float64 `perfdata:"Messages in Queue"`
+}
diff --git a/internal/collector/mssql/mssql.go b/internal/collector/mssql/mssql.go
index 6d851b356..1ff8013cf 100644
--- a/internal/collector/mssql/mssql.go
+++ b/internal/collector/mssql/mssql.go
@@ -16,6 +16,7 @@
package mssql
import (
+ "context"
"errors"
"fmt"
"log/slog"
@@ -26,7 +27,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
@@ -273,7 +274,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
if len(c.mssqlInstances) == 0 {
- return fmt.Errorf("no SQL instances found: %w", perfdata.ErrNoData)
+ return fmt.Errorf("no SQL instances found: %w", pdh.ErrNoData)
}
errCh := make(chan error, len(c.collectorFns))
@@ -368,26 +369,32 @@ func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string)
func (c *Collector) collect(
ch chan<- prometheus.Metric,
collector string,
- perfDataCollectors map[string]*perfdata.Collector,
- collectFn func(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error,
+ perfDataCollectors map[string]*pdh.Collector,
+ collectFn func(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error,
) error {
errs := make([]error, 0, len(perfDataCollectors))
+ ctx := context.Background()
+
for sqlInstance, perfDataCollector := range perfDataCollectors {
begin := time.Now()
success := 1.0
err := collectFn(ch, sqlInstance, perfDataCollector)
duration := time.Since(begin)
- if err != nil && !errors.Is(err, perfdata.ErrNoData) {
+ if err != nil && !errors.Is(err, pdh.ErrNoData) {
errs = append(errs, err)
success = 0.0
- c.logger.Debug(fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration),
+ c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration),
slog.Any("err", err),
)
} else {
- c.logger.Debug(fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance, duration))
+ c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance, duration))
+ }
+
+ if collector == "" {
+ continue
}
ch <- prometheus.MustNewConstMetric(
diff --git a/internal/collector/mssql/mssql_access_methods.go b/internal/collector/mssql/mssql_access_methods.go
index b37a56b6b..52fe3d338 100644
--- a/internal/collector/mssql/mssql_access_methods.go
+++ b/internal/collector/mssql/mssql_access_methods.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorAccessMethods struct {
- accessMethodsPerfDataCollectors map[string]*perfdata.Collector
+ accessMethodsPerfDataCollectors map[string]*pdh.Collector
+ accessMethodsPerfDataObject []perfDataCounterValuesAccessMethods
accessMethodsAUcleanupbatches *prometheus.Desc
accessMethodsAUcleanups *prometheus.Desc
@@ -73,107 +74,63 @@ type collectorAccessMethods struct {
accessMethodsWorktablesFromCacheLookups *prometheus.Desc
}
-const (
- accessMethodsAUCleanupbatchesPerSec = "AU cleanup batches/sec"
- accessMethodsAUCleanupsPerSec = "AU cleanups/sec"
- accessMethodsByReferenceLobCreateCount = "By-reference Lob Create Count"
- accessMethodsByReferenceLobUseCount = "By-reference Lob Use Count"
- accessMethodsCountLobReadahead = "Count Lob Readahead"
- accessMethodsCountPullInRow = "Count Pull In Row"
- accessMethodsCountPushOffRow = "Count Push Off Row"
- accessMethodsDeferredDroppedAUs = "Deferred dropped AUs"
- accessMethodsDeferredDroppedRowsets = "Deferred Dropped rowsets"
- accessMethodsDroppedRowsetCleanupsPerSec = "Dropped rowset cleanups/sec"
- accessMethodsDroppedRowsetsSkippedPerSec = "Dropped rowsets skipped/sec"
- accessMethodsExtentDeallocationsPerSec = "Extent Deallocations/sec"
- accessMethodsExtentsAllocatedPerSec = "Extents Allocated/sec"
- accessMethodsFailedAUCleanupBatchesPerSec = "Failed AU cleanup batches/sec"
- accessMethodsFailedLeafPageCookie = "Failed leaf page cookie"
- accessMethodsFailedTreePageCookie = "Failed tree page cookie"
- accessMethodsForwardedRecordsPerSec = "Forwarded Records/sec"
- accessMethodsFreeSpacePageFetchesPerSec = "FreeSpace Page Fetches/sec"
- accessMethodsFreeSpaceScansPerSec = "FreeSpace Scans/sec"
- accessMethodsFullScansPerSec = "Full Scans/sec"
- accessMethodsIndexSearchesPerSec = "Index Searches/sec"
- accessMethodsInSysXactWaitsPerSec = "InSysXact waits/sec"
- accessMethodsLobHandleCreateCount = "LobHandle Create Count"
- accessMethodsLobHandleDestroyCount = "LobHandle Destroy Count"
- accessMethodsLobSSProviderCreateCount = "LobSS Provider Create Count"
- accessMethodsLobSSProviderDestroyCount = "LobSS Provider Destroy Count"
- accessMethodsLobSSProviderTruncationCount = "LobSS Provider Truncation Count"
- accessMethodsMixedPageAllocationsPerSec = "Mixed page allocations/sec"
- accessMethodsPageCompressionAttemptsPerSec = "Page compression attempts/sec"
- accessMethodsPageDeallocationsPerSec = "Page Deallocations/sec"
- accessMethodsPagesAllocatedPerSec = "Pages Allocated/sec"
- accessMethodsPagesCompressedPerSec = "Pages compressed/sec"
- accessMethodsPageSplitsPerSec = "Page Splits/sec"
- accessMethodsProbeScansPerSec = "Probe Scans/sec"
- accessMethodsRangeScansPerSec = "Range Scans/sec"
- accessMethodsScanPointRevalidationsPerSec = "Scan Point Revalidations/sec"
- accessMethodsSkippedGhostedRecordsPerSec = "Skipped Ghosted Records/sec"
- accessMethodsTableLockEscalationsPerSec = "Table Lock Escalations/sec"
- accessMethodsUsedLeafPageCookie = "Used leaf page cookie"
- accessMethodsUsedTreePageCookie = "Used tree page cookie"
- accessMethodsWorkfilesCreatedPerSec = "Workfiles Created/sec"
- accessMethodsWorktablesCreatedPerSec = "Worktables Created/sec"
- accessMethodsWorktablesFromCacheRatio = "Worktables From Cache Ratio"
- accessMethodsWorktablesFromCacheRatioBase = "Worktables From Cache Base"
-)
+type perfDataCounterValuesAccessMethods struct {
+ AccessMethodsAUCleanupbatchesPerSec float64 `perfdata:"AU cleanup batches/sec"`
+ AccessMethodsAUCleanupsPerSec float64 `perfdata:"AU cleanups/sec"`
+ AccessMethodsByReferenceLobCreateCount float64 `perfdata:"By-reference Lob Create Count"`
+ AccessMethodsByReferenceLobUseCount float64 `perfdata:"By-reference Lob Use Count"`
+ AccessMethodsCountLobReadahead float64 `perfdata:"Count Lob Readahead"`
+ AccessMethodsCountPullInRow float64 `perfdata:"Count Pull In Row"`
+ AccessMethodsCountPushOffRow float64 `perfdata:"Count Push Off Row"`
+ AccessMethodsDeferredDroppedAUs float64 `perfdata:"Deferred dropped AUs"`
+ AccessMethodsDeferredDroppedRowsets float64 `perfdata:"Deferred Dropped rowsets"`
+ AccessMethodsDroppedRowsetCleanupsPerSec float64 `perfdata:"Dropped rowset cleanups/sec"`
+ AccessMethodsDroppedRowsetsSkippedPerSec float64 `perfdata:"Dropped rowsets skipped/sec"`
+ AccessMethodsExtentDeallocationsPerSec float64 `perfdata:"Extent Deallocations/sec"`
+ AccessMethodsExtentsAllocatedPerSec float64 `perfdata:"Extents Allocated/sec"`
+ AccessMethodsFailedAUCleanupBatchesPerSec float64 `perfdata:"Failed AU cleanup batches/sec"`
+ AccessMethodsFailedLeafPageCookie float64 `perfdata:"Failed leaf page cookie"`
+ AccessMethodsFailedTreePageCookie float64 `perfdata:"Failed tree page cookie"`
+ AccessMethodsForwardedRecordsPerSec float64 `perfdata:"Forwarded Records/sec"`
+ AccessMethodsFreeSpacePageFetchesPerSec float64 `perfdata:"FreeSpace Page Fetches/sec"`
+ AccessMethodsFreeSpaceScansPerSec float64 `perfdata:"FreeSpace Scans/sec"`
+ AccessMethodsFullScansPerSec float64 `perfdata:"Full Scans/sec"`
+ AccessMethodsIndexSearchesPerSec float64 `perfdata:"Index Searches/sec"`
+ AccessMethodsInSysXactWaitsPerSec float64 `perfdata:"InSysXact waits/sec"`
+ AccessMethodsLobHandleCreateCount float64 `perfdata:"LobHandle Create Count"`
+ AccessMethodsLobHandleDestroyCount float64 `perfdata:"LobHandle Destroy Count"`
+ AccessMethodsLobSSProviderCreateCount float64 `perfdata:"LobSS Provider Create Count"`
+ AccessMethodsLobSSProviderDestroyCount float64 `perfdata:"LobSS Provider Destroy Count"`
+ AccessMethodsLobSSProviderTruncationCount float64 `perfdata:"LobSS Provider Truncation Count"`
+ AccessMethodsMixedPageAllocationsPerSec float64 `perfdata:"Mixed page allocations/sec"`
+ AccessMethodsPageCompressionAttemptsPerSec float64 `perfdata:"Page compression attempts/sec"`
+ AccessMethodsPageDeallocationsPerSec float64 `perfdata:"Page Deallocations/sec"`
+ AccessMethodsPagesAllocatedPerSec float64 `perfdata:"Pages Allocated/sec"`
+ AccessMethodsPagesCompressedPerSec float64 `perfdata:"Pages compressed/sec"`
+ AccessMethodsPageSplitsPerSec float64 `perfdata:"Page Splits/sec"`
+ AccessMethodsProbeScansPerSec float64 `perfdata:"Probe Scans/sec"`
+ AccessMethodsRangeScansPerSec float64 `perfdata:"Range Scans/sec"`
+ AccessMethodsScanPointRevalidationsPerSec float64 `perfdata:"Scan Point Revalidations/sec"`
+ AccessMethodsSkippedGhostedRecordsPerSec float64 `perfdata:"Skipped Ghosted Records/sec"`
+ AccessMethodsTableLockEscalationsPerSec float64 `perfdata:"Table Lock Escalations/sec"`
+ AccessMethodsUsedLeafPageCookie float64 `perfdata:"Used leaf page cookie"`
+ AccessMethodsUsedTreePageCookie float64 `perfdata:"Used tree page cookie"`
+ AccessMethodsWorkfilesCreatedPerSec float64 `perfdata:"Workfiles Created/sec"`
+ AccessMethodsWorktablesCreatedPerSec float64 `perfdata:"Worktables Created/sec"`
+ AccessMethodsWorktablesFromCacheRatio float64 `perfdata:"Worktables From Cache Ratio"`
+ AccessMethodsWorktablesFromCacheRatioBase float64 `perfdata:"Worktables From Cache Base,secondvalue"`
+}
func (c *Collector) buildAccessMethods() error {
var err error
- c.accessMethodsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.accessMethodsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- accessMethodsAUCleanupbatchesPerSec,
- accessMethodsAUCleanupsPerSec,
- accessMethodsByReferenceLobCreateCount,
- accessMethodsByReferenceLobUseCount,
- accessMethodsCountLobReadahead,
- accessMethodsCountPullInRow,
- accessMethodsCountPushOffRow,
- accessMethodsDeferredDroppedAUs,
- accessMethodsDeferredDroppedRowsets,
- accessMethodsDroppedRowsetCleanupsPerSec,
- accessMethodsDroppedRowsetsSkippedPerSec,
- accessMethodsExtentDeallocationsPerSec,
- accessMethodsExtentsAllocatedPerSec,
- accessMethodsFailedAUCleanupBatchesPerSec,
- accessMethodsFailedLeafPageCookie,
- accessMethodsFailedTreePageCookie,
- accessMethodsForwardedRecordsPerSec,
- accessMethodsFreeSpacePageFetchesPerSec,
- accessMethodsFreeSpaceScansPerSec,
- accessMethodsFullScansPerSec,
- accessMethodsIndexSearchesPerSec,
- accessMethodsInSysXactWaitsPerSec,
- accessMethodsLobHandleCreateCount,
- accessMethodsLobHandleDestroyCount,
- accessMethodsLobSSProviderCreateCount,
- accessMethodsLobSSProviderDestroyCount,
- accessMethodsLobSSProviderTruncationCount,
- accessMethodsMixedPageAllocationsPerSec,
- accessMethodsPageCompressionAttemptsPerSec,
- accessMethodsPageDeallocationsPerSec,
- accessMethodsPagesAllocatedPerSec,
- accessMethodsPagesCompressedPerSec,
- accessMethodsPageSplitsPerSec,
- accessMethodsProbeScansPerSec,
- accessMethodsRangeScansPerSec,
- accessMethodsScanPointRevalidationsPerSec,
- accessMethodsSkippedGhostedRecordsPerSec,
- accessMethodsTableLockEscalationsPerSec,
- accessMethodsUsedLeafPageCookie,
- accessMethodsUsedTreePageCookie,
- accessMethodsWorkfilesCreatedPerSec,
- accessMethodsWorktablesCreatedPerSec,
- accessMethodsWorktablesFromCacheRatio,
- accessMethodsWorktablesFromCacheRatioBase,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.accessMethodsPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Access Methods"), nil, counters)
+ c.accessMethodsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Access Methods"), nil,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err))
}
@@ -452,326 +409,317 @@ func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorAccessMethods, c.accessMethodsPerfDataCollectors, c.collectAccessMethodsInstance)
}
-func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.accessMethodsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"))
- }
-
ch <- prometheus.MustNewConstMetric(
c.accessMethodsAUcleanupbatches,
prometheus.CounterValue,
- data[accessMethodsAUCleanupbatchesPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupbatchesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsAUcleanups,
prometheus.CounterValue,
- data[accessMethodsAUCleanupsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsByReferenceLobCreateCount,
prometheus.CounterValue,
- data[accessMethodsByReferenceLobCreateCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobCreateCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsByReferenceLobUseCount,
prometheus.CounterValue,
- data[accessMethodsByReferenceLobUseCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobUseCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountLobReadahead,
prometheus.CounterValue,
- data[accessMethodsCountLobReadahead].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsCountLobReadahead,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountPullInRow,
prometheus.CounterValue,
- data[accessMethodsCountPullInRow].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsCountPullInRow,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountPushOffRow,
prometheus.CounterValue,
- data[accessMethodsCountPushOffRow].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsCountPushOffRow,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDeferreddroppedAUs,
prometheus.GaugeValue,
- data[accessMethodsDeferredDroppedAUs].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedAUs,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDeferredDroppedrowsets,
prometheus.GaugeValue,
- data[accessMethodsDeferredDroppedRowsets].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedRowsets,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDroppedrowsetcleanups,
prometheus.CounterValue,
- data[accessMethodsDroppedRowsetCleanupsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetCleanupsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDroppedrowsetsskipped,
prometheus.CounterValue,
- data[accessMethodsDroppedRowsetsSkippedPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetsSkippedPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsExtentDeallocations,
prometheus.CounterValue,
- data[accessMethodsExtentDeallocationsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsExtentDeallocationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsExtentsAllocated,
prometheus.CounterValue,
- data[accessMethodsExtentsAllocatedPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsExtentsAllocatedPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedAUcleanupbatches,
prometheus.CounterValue,
- data[accessMethodsFailedAUCleanupBatchesPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsFailedAUCleanupBatchesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedleafpagecookie,
prometheus.CounterValue,
- data[accessMethodsFailedLeafPageCookie].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsFailedLeafPageCookie,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedtreepagecookie,
prometheus.CounterValue,
- data[accessMethodsFailedTreePageCookie].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsFailedTreePageCookie,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsForwardedRecords,
prometheus.CounterValue,
- data[accessMethodsForwardedRecordsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsForwardedRecordsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFreeSpacePageFetches,
prometheus.CounterValue,
- data[accessMethodsFreeSpacePageFetchesPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpacePageFetchesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFreeSpaceScans,
prometheus.CounterValue,
- data[accessMethodsFreeSpaceScansPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpaceScansPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFullScans,
prometheus.CounterValue,
- data[accessMethodsFullScansPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsFullScansPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsIndexSearches,
prometheus.CounterValue,
- data[accessMethodsIndexSearchesPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsIndexSearchesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsInSysXactwaits,
prometheus.CounterValue,
- data[accessMethodsInSysXactWaitsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsInSysXactWaitsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobHandleCreateCount,
prometheus.CounterValue,
- data[accessMethodsLobHandleCreateCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleCreateCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobHandleDestroyCount,
prometheus.CounterValue,
- data[accessMethodsLobHandleDestroyCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleDestroyCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderCreateCount,
prometheus.CounterValue,
- data[accessMethodsLobSSProviderCreateCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderCreateCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderDestroyCount,
prometheus.CounterValue,
- data[accessMethodsLobSSProviderDestroyCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderDestroyCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderTruncationCount,
prometheus.CounterValue,
- data[accessMethodsLobSSProviderTruncationCount].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderTruncationCount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsMixedPageAllocations,
prometheus.CounterValue,
- data[accessMethodsMixedPageAllocationsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsMixedPageAllocationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageCompressionAttempts,
prometheus.CounterValue,
- data[accessMethodsPageCompressionAttemptsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsPageCompressionAttemptsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageDeallocations,
prometheus.CounterValue,
- data[accessMethodsPageDeallocationsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsPageDeallocationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPagesAllocated,
prometheus.CounterValue,
- data[accessMethodsPagesAllocatedPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsPagesAllocatedPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPagesCompressed,
prometheus.CounterValue,
- data[accessMethodsPagesCompressedPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsPagesCompressedPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageSplits,
prometheus.CounterValue,
- data[accessMethodsPageSplitsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsPageSplitsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsProbeScans,
prometheus.CounterValue,
- data[accessMethodsProbeScansPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsProbeScansPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsRangeScans,
prometheus.CounterValue,
- data[accessMethodsRangeScansPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsRangeScansPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsScanPointRevalidations,
prometheus.CounterValue,
- data[accessMethodsScanPointRevalidationsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsScanPointRevalidationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsSkippedGhostedRecords,
prometheus.CounterValue,
- data[accessMethodsSkippedGhostedRecordsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsSkippedGhostedRecordsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsTableLockEscalations,
prometheus.CounterValue,
- data[accessMethodsTableLockEscalationsPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsTableLockEscalationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsUsedleafpagecookie,
prometheus.CounterValue,
- data[accessMethodsUsedLeafPageCookie].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsUsedLeafPageCookie,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsUsedtreepagecookie,
prometheus.CounterValue,
- data[accessMethodsUsedTreePageCookie].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsUsedTreePageCookie,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorkfilesCreated,
prometheus.CounterValue,
- data[accessMethodsWorkfilesCreatedPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsWorkfilesCreatedPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesCreated,
prometheus.CounterValue,
- data[accessMethodsWorktablesCreatedPerSec].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesCreatedPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesFromCacheHits,
prometheus.CounterValue,
- data[accessMethodsWorktablesFromCacheRatio].FirstValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatio,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesFromCacheLookups,
prometheus.CounterValue,
- data[accessMethodsWorktablesFromCacheRatioBase].SecondValue,
+ c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatioBase,
sqlInstance,
)
diff --git a/internal/collector/mssql/mssql_availability_replica.go b/internal/collector/mssql/mssql_availability_replica.go
index 0817e11b4..6214f141b 100644
--- a/internal/collector/mssql/mssql_availability_replica.go
+++ b/internal/collector/mssql/mssql_availability_replica.go
@@ -19,14 +19,15 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
type collectorAvailabilityReplica struct {
- availabilityReplicaPerfDataCollectors map[string]*perfdata.Collector
+ availabilityReplicaPerfDataCollectors map[string]*pdh.Collector
+ availabilityReplicaPerfDataObject []perfDataCounterValuesAvailabilityReplica
availReplicaBytesReceivedFromReplica *prometheus.Desc
availReplicaBytesSentToReplica *prometheus.Desc
@@ -39,37 +40,30 @@ type collectorAvailabilityReplica struct {
availReplicaSendsToTransport *prometheus.Desc
}
-const (
- availReplicaBytesReceivedFromReplicaPerSec = "Bytes Received from Replica/sec"
- availReplicaBytesSentToReplicaPerSec = "Bytes Sent to Replica/sec"
- availReplicaBytesSentToTransportPerSec = "Bytes Sent to Transport/sec"
- availReplicaFlowControlPerSec = "Flow Control/sec"
- availReplicaFlowControlTimeMSPerSec = "Flow Control Time (ms/sec)"
- availReplicaReceivesFromReplicaPerSec = "Receives from Replica/sec"
- availReplicaResentMessagesPerSec = "Resent Messages/sec"
- availReplicaSendsToReplicaPerSec = "Sends to Replica/sec"
- availReplicaSendsToTransportPerSec = "Sends to Transport/sec"
-)
+type perfDataCounterValuesAvailabilityReplica struct {
+ Name string
+
+ AvailReplicaBytesReceivedFromReplicaPerSec float64 `perfdata:"Bytes Received from Replica/sec"`
+ AvailReplicaBytesSentToReplicaPerSec float64 `perfdata:"Bytes Sent to Replica/sec"`
+ AvailReplicaBytesSentToTransportPerSec float64 `perfdata:"Bytes Sent to Transport/sec"`
+ AvailReplicaFlowControlPerSec float64 `perfdata:"Flow Control/sec"`
+ AvailReplicaFlowControlTimeMSPerSec float64 `perfdata:"Flow Control Time (ms/sec)"`
+ AvailReplicaReceivesFromReplicaPerSec float64 `perfdata:"Receives from Replica/sec"`
+ AvailReplicaResentMessagesPerSec float64 `perfdata:"Resent Messages/sec"`
+ AvailReplicaSendsToReplicaPerSec float64 `perfdata:"Sends to Replica/sec"`
+ AvailReplicaSendsToTransportPerSec float64 `perfdata:"Sends to Transport/sec"`
+}
func (c *Collector) buildAvailabilityReplica() error {
var err error
- c.availabilityReplicaPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.availabilityReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- availReplicaBytesReceivedFromReplicaPerSec,
- availReplicaBytesSentToReplicaPerSec,
- availReplicaBytesSentToTransportPerSec,
- availReplicaFlowControlPerSec,
- availReplicaFlowControlTimeMSPerSec,
- availReplicaReceivesFromReplicaPerSec,
- availReplicaResentMessagesPerSec,
- availReplicaSendsToReplicaPerSec,
- availReplicaSendsToTransportPerSec,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.availabilityReplicaPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Availability Replica"), perfdata.InstancesAll, counters)
+ c.availabilityReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Availability Replica"), pdh.InstancesAll,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err))
}
@@ -138,78 +132,74 @@ func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) erro
return c.collect(ch, subCollectorAvailabilityReplica, c.availabilityReplicaPerfDataCollectors, c.collectAvailabilityReplicaInstance)
}
-func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.availabilityReplicaPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err)
}
- for replicaName, data := range perfData {
+ for _, data := range c.availabilityReplicaPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesReceivedFromReplica,
prometheus.CounterValue,
- data[availReplicaBytesReceivedFromReplicaPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaBytesReceivedFromReplicaPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesSentToReplica,
prometheus.CounterValue,
- data[availReplicaBytesSentToReplicaPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaBytesSentToReplicaPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesSentToTransport,
prometheus.CounterValue,
- data[availReplicaBytesSentToTransportPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaBytesSentToTransportPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaFlowControl,
prometheus.CounterValue,
- data[availReplicaFlowControlPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaFlowControlPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaFlowControlTimeMS,
prometheus.CounterValue,
- utils.MilliSecToSec(data[availReplicaFlowControlTimeMSPerSec].FirstValue),
- sqlInstance, replicaName,
+ utils.MilliSecToSec(data.AvailReplicaFlowControlTimeMSPerSec),
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaReceivesFromReplica,
prometheus.CounterValue,
- data[availReplicaReceivesFromReplicaPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaReceivesFromReplicaPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaResentMessages,
prometheus.CounterValue,
- data[availReplicaResentMessagesPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaResentMessagesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaSendsToReplica,
prometheus.CounterValue,
- data[availReplicaSendsToReplicaPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaSendsToReplicaPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaSendsToTransport,
prometheus.CounterValue,
- data[availReplicaSendsToTransportPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.AvailReplicaSendsToTransportPerSec,
+ sqlInstance, data.Name,
)
}
diff --git a/internal/collector/mssql/mssql_buffer_manager.go b/internal/collector/mssql/mssql_buffer_manager.go
index 12e297c0f..61de0884c 100644
--- a/internal/collector/mssql/mssql_buffer_manager.go
+++ b/internal/collector/mssql/mssql_buffer_manager.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorBufferManager struct {
- bufManPerfDataCollectors map[string]*perfdata.Collector
+ bufManPerfDataCollectors map[string]*pdh.Collector
+ bufManPerfDataObject []perfDataCounterValuesBufMan
bufManBackgroundwriterpages *prometheus.Desc
bufManBuffercachehits *prometheus.Desc
@@ -52,65 +53,42 @@ type collectorBufferManager struct {
bufManTargetpages *prometheus.Desc
}
-const (
- bufManBackgroundWriterPagesPerSec = "Background writer pages/sec"
- bufManBufferCacheHitRatio = "Buffer cache hit ratio"
- bufManBufferCacheHitRatioBase = "Buffer cache hit ratio base"
- bufManCheckpointPagesPerSec = "Checkpoint pages/sec"
- bufManDatabasePages = "Database pages"
- bufManExtensionAllocatedPages = "Extension allocated pages"
- bufManExtensionFreePages = "Extension free pages"
- bufManExtensionInUseAsPercentage = "Extension in use as percentage"
- bufManExtensionOutstandingIOCounter = "Extension outstanding IO counter"
- bufManExtensionPageEvictionsPerSec = "Extension page evictions/sec"
- bufManExtensionPageReadsPerSec = "Extension page reads/sec"
- bufManExtensionPageUnreferencedTime = "Extension page unreferenced time"
- bufManExtensionPageWritesPerSec = "Extension page writes/sec"
- bufManFreeListStallsPerSec = "Free list stalls/sec"
- bufManIntegralControllerSlope = "Integral Controller Slope"
- bufManLazyWritesPerSec = "Lazy writes/sec"
- bufManPageLifeExpectancy = "Page life expectancy"
- bufManPageLookupsPerSec = "Page lookups/sec"
- bufManPageReadsPerSec = "Page reads/sec"
- bufManPageWritesPerSec = "Page writes/sec"
- bufManReadaheadPagesPerSec = "Readahead pages/sec"
- bufManReadaheadTimePerSec = "Readahead time/sec"
- bufManTargetPages = "Target pages"
-)
+type perfDataCounterValuesBufMan struct {
+ BufManBackgroundWriterPagesPerSec float64 `perfdata:"Background writer pages/sec"`
+ BufManBufferCacheHitRatio float64 `perfdata:"Buffer cache hit ratio"`
+ BufManBufferCacheHitRatioBase float64 `perfdata:"Buffer cache hit ratio base,secondvalue"`
+ BufManCheckpointPagesPerSec float64 `perfdata:"Checkpoint pages/sec"`
+ BufManDatabasePages float64 `perfdata:"Database pages"`
+ BufManExtensionAllocatedPages float64 `perfdata:"Extension allocated pages"`
+ BufManExtensionFreePages float64 `perfdata:"Extension free pages"`
+ BufManExtensionInUseAsPercentage float64 `perfdata:"Extension in use as percentage"`
+ BufManExtensionOutstandingIOCounter float64 `perfdata:"Extension outstanding IO counter"`
+ BufManExtensionPageEvictionsPerSec float64 `perfdata:"Extension page evictions/sec"`
+ BufManExtensionPageReadsPerSec float64 `perfdata:"Extension page reads/sec"`
+ BufManExtensionPageUnreferencedTime float64 `perfdata:"Extension page unreferenced time"`
+ BufManExtensionPageWritesPerSec float64 `perfdata:"Extension page writes/sec"`
+ BufManFreeListStallsPerSec float64 `perfdata:"Free list stalls/sec"`
+ BufManIntegralControllerSlope float64 `perfdata:"Integral Controller Slope"`
+ BufManLazyWritesPerSec float64 `perfdata:"Lazy writes/sec"`
+ BufManPageLifeExpectancy float64 `perfdata:"Page life expectancy"`
+ BufManPageLookupsPerSec float64 `perfdata:"Page lookups/sec"`
+ BufManPageReadsPerSec float64 `perfdata:"Page reads/sec"`
+ BufManPageWritesPerSec float64 `perfdata:"Page writes/sec"`
+ BufManReadaheadPagesPerSec float64 `perfdata:"Readahead pages/sec"`
+ BufManReadaheadTimePerSec float64 `perfdata:"Readahead time/sec"`
+ BufManTargetPages float64 `perfdata:"Target pages"`
+}
func (c *Collector) buildBufferManager() error {
var err error
- c.bufManPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.bufManPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- bufManBackgroundWriterPagesPerSec,
- bufManBufferCacheHitRatio,
- bufManBufferCacheHitRatioBase,
- bufManCheckpointPagesPerSec,
- bufManDatabasePages,
- bufManExtensionAllocatedPages,
- bufManExtensionFreePages,
- bufManExtensionInUseAsPercentage,
- bufManExtensionOutstandingIOCounter,
- bufManExtensionPageEvictionsPerSec,
- bufManExtensionPageReadsPerSec,
- bufManExtensionPageUnreferencedTime,
- bufManExtensionPageWritesPerSec,
- bufManFreeListStallsPerSec,
- bufManIntegralControllerSlope,
- bufManLazyWritesPerSec,
- bufManPageLifeExpectancy,
- bufManPageLookupsPerSec,
- bufManPageReadsPerSec,
- bufManPageWritesPerSec,
- bufManReadaheadPagesPerSec,
- bufManReadaheadTimePerSec,
- bufManTargetPages,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.bufManPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Buffer Manager"), nil, counters)
+ c.bufManPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesBufMan](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Buffer Manager"), nil,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err))
}
@@ -262,175 +240,171 @@ func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorBufferManager, c.bufManPerfDataCollectors, c.collectBufferManagerInstance)
}
-func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.bufManPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err)
}
- for _, data := range perfData {
+ for _, data := range c.bufManPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.bufManBackgroundwriterpages,
prometheus.CounterValue,
- data[bufManBackgroundWriterPagesPerSec].FirstValue,
+ data.BufManBackgroundWriterPagesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManBuffercachehits,
prometheus.GaugeValue,
- data[bufManBufferCacheHitRatio].FirstValue,
+ data.BufManBufferCacheHitRatio,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManBuffercachelookups,
prometheus.GaugeValue,
- data[bufManBufferCacheHitRatioBase].SecondValue,
+ data.BufManBufferCacheHitRatioBase,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManCheckpointpages,
prometheus.CounterValue,
- data[bufManCheckpointPagesPerSec].FirstValue,
+ data.BufManCheckpointPagesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManDatabasepages,
prometheus.GaugeValue,
- data[bufManDatabasePages].FirstValue,
+ data.BufManDatabasePages,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionallocatedpages,
prometheus.GaugeValue,
- data[bufManExtensionAllocatedPages].FirstValue,
+ data.BufManExtensionAllocatedPages,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionfreepages,
prometheus.GaugeValue,
- data[bufManExtensionFreePages].FirstValue,
+ data.BufManExtensionFreePages,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensioninuseaspercentage,
prometheus.GaugeValue,
- data[bufManExtensionInUseAsPercentage].FirstValue,
+ data.BufManExtensionInUseAsPercentage,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionoutstandingIOcounter,
prometheus.GaugeValue,
- data[bufManExtensionOutstandingIOCounter].FirstValue,
+ data.BufManExtensionOutstandingIOCounter,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpageevictions,
prometheus.CounterValue,
- data[bufManExtensionPageEvictionsPerSec].FirstValue,
+ data.BufManExtensionPageEvictionsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpagereads,
prometheus.CounterValue,
- data[bufManExtensionPageReadsPerSec].FirstValue,
+ data.BufManExtensionPageReadsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpageunreferencedtime,
prometheus.GaugeValue,
- data[bufManExtensionPageUnreferencedTime].FirstValue,
+ data.BufManExtensionPageUnreferencedTime,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpagewrites,
prometheus.CounterValue,
- data[bufManExtensionPageWritesPerSec].FirstValue,
+ data.BufManExtensionPageWritesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManFreeliststalls,
prometheus.CounterValue,
- data[bufManFreeListStallsPerSec].FirstValue,
+ data.BufManFreeListStallsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManIntegralControllerSlope,
prometheus.GaugeValue,
- data[bufManIntegralControllerSlope].FirstValue,
+ data.BufManIntegralControllerSlope,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManLazywrites,
prometheus.CounterValue,
- data[bufManLazyWritesPerSec].FirstValue,
+ data.BufManLazyWritesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagelifeexpectancy,
prometheus.GaugeValue,
- data[bufManPageLifeExpectancy].FirstValue,
+ data.BufManPageLifeExpectancy,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagelookups,
prometheus.CounterValue,
- data[bufManPageLookupsPerSec].FirstValue,
+ data.BufManPageLookupsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagereads,
prometheus.CounterValue,
- data[bufManPageReadsPerSec].FirstValue,
+ data.BufManPageReadsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagewrites,
prometheus.CounterValue,
- data[bufManPageWritesPerSec].FirstValue,
+ data.BufManPageWritesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManReadaheadpages,
prometheus.CounterValue,
- data[bufManReadaheadPagesPerSec].FirstValue,
+ data.BufManReadaheadPagesPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManReadaheadtime,
prometheus.CounterValue,
- data[bufManReadaheadTimePerSec].FirstValue,
+ data.BufManReadaheadTimePerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManTargetpages,
prometheus.GaugeValue,
- data[bufManTargetPages].FirstValue,
+ data.BufManTargetPages,
sqlInstance,
)
}
diff --git a/internal/collector/mssql/mssql_database.go b/internal/collector/mssql/mssql_database.go
index bc6a337fb..dd8b51f8b 100644
--- a/internal/collector/mssql/mssql_database.go
+++ b/internal/collector/mssql/mssql_database.go
@@ -19,13 +19,16 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorDatabases struct {
- databasesPerfDataCollectors map[string]*perfdata.Collector
+ databasesPerfDataCollectors map[string]*pdh.Collector
+ databasesPerfDataCollectors2019 map[string]*pdh.Collector
+ databasesPerfDataObject []perfDataCounterValuesDatabases
+ databasesPerfDataObject2019 []perfDataCounterValuesDatabases2019
databasesActiveParallelRedoThreads *prometheus.Desc
databasesActiveTransactions *prometheus.Desc
@@ -77,121 +80,83 @@ type collectorDatabases struct {
databasesXTPMemoryUsedKB *prometheus.Desc
}
-const (
- databasesActiveParallelRedoThreads = "Active parallel redo threads"
- databasesActiveTransactions = "Active Transactions"
- databasesBackupPerRestoreThroughputPerSec = "Backup/Restore Throughput/sec"
- databasesBulkCopyRowsPerSec = "Bulk Copy Rows/sec"
- databasesBulkCopyThroughputPerSec = "Bulk Copy Throughput/sec"
- databasesCommitTableEntries = "Commit table entries"
- databasesDataFilesSizeKB = "Data File(s) Size (KB)"
- databasesDBCCLogicalScanBytesPerSec = "DBCC Logical Scan Bytes/sec"
- databasesGroupCommitTimePerSec = "Group Commit Time/sec"
- databasesLogBytesFlushedPerSec = "Log Bytes Flushed/sec"
- databasesLogCacheHitRatio = "Log Cache Hit Ratio"
- databasesLogCacheHitRatioBase = "Log Cache Hit Ratio Base"
- databasesLogCacheReadsPerSec = "Log Cache Reads/sec"
- databasesLogFilesSizeKB = "Log File(s) Size (KB)"
- databasesLogFilesUsedSizeKB = "Log File(s) Used Size (KB)"
- databasesLogFlushesPerSec = "Log Flushes/sec"
- databasesLogFlushWaitsPerSec = "Log Flush Waits/sec"
- databasesLogFlushWaitTime = "Log Flush Wait Time"
- databasesLogFlushWriteTimeMS = "Log Flush Write Time (ms)"
- databasesLogGrowths = "Log Growths"
- databasesLogPoolCacheMissesPerSec = "Log Pool Cache Misses/sec"
- databasesLogPoolDiskReadsPerSec = "Log Pool Disk Reads/sec"
- databasesLogPoolHashDeletesPerSec = "Log Pool Hash Deletes/sec"
- databasesLogPoolHashInsertsPerSec = "Log Pool Hash Inserts/sec"
- databasesLogPoolInvalidHashEntryPerSec = "Log Pool Invalid Hash Entry/sec"
- databasesLogPoolLogScanPushesPerSec = "Log Pool Log Scan Pushes/sec"
- databasesLogPoolLogWriterPushesPerSec = "Log Pool LogWriter Pushes/sec"
- databasesLogPoolPushEmptyFreePoolPerSec = "Log Pool Push Empty FreePool/sec"
- databasesLogPoolPushLowMemoryPerSec = "Log Pool Push Low Memory/sec"
- databasesLogPoolPushNoFreeBufferPerSec = "Log Pool Push No Free Buffer/sec"
- databasesLogPoolReqBehindTruncPerSec = "Log Pool Req. Behind Trunc/sec"
- databasesLogPoolRequestsOldVLFPerSec = "Log Pool Requests Old VLF/sec"
- databasesLogPoolRequestsPerSec = "Log Pool Requests/sec"
- databasesLogPoolTotalActiveLogSize = "Log Pool Total Active Log Size"
- databasesLogPoolTotalSharedPoolSize = "Log Pool Total Shared Pool Size"
- databasesLogShrinks = "Log Shrinks"
- databasesLogTruncations = "Log Truncations"
- databasesPercentLogUsed = "Percent Log Used"
- databasesReplPendingXacts = "Repl. Pending Xacts"
- databasesReplTransRate = "Repl. Trans. Rate"
- databasesShrinkDataMovementBytesPerSec = "Shrink Data Movement Bytes/sec"
- databasesTrackedTransactionsPerSec = "Tracked transactions/sec"
- databasesTransactionsPerSec = "Transactions/sec"
- databasesWriteTransactionsPerSec = "Write Transactions/sec"
- databasesXTPControllerDLCLatencyPerFetch = "XTP Controller DLC Latency/Fetch"
- databasesXTPControllerDLCPeakLatency = "XTP Controller DLC Peak Latency"
- databasesXTPControllerLogProcessedPerSec = "XTP Controller Log Processed/sec"
- databasesXTPMemoryUsedKB = "XTP Memory Used (KB)"
-)
+type perfDataCounterValuesDatabases struct {
+ Name string
+
+ DatabasesActiveTransactions float64 `perfdata:"Active Transactions"`
+ DatabasesBackupPerRestoreThroughputPerSec float64 `perfdata:"Backup/Restore Throughput/sec"`
+ DatabasesBulkCopyRowsPerSec float64 `perfdata:"Bulk Copy Rows/sec"`
+ DatabasesBulkCopyThroughputPerSec float64 `perfdata:"Bulk Copy Throughput/sec"`
+ DatabasesCommitTableEntries float64 `perfdata:"Commit table entries"`
+ DatabasesDataFilesSizeKB float64 `perfdata:"Data File(s) Size (KB)"`
+ DatabasesDBCCLogicalScanBytesPerSec float64 `perfdata:"DBCC Logical Scan Bytes/sec"`
+ DatabasesGroupCommitTimePerSec float64 `perfdata:"Group Commit Time/sec"`
+ DatabasesLogBytesFlushedPerSec float64 `perfdata:"Log Bytes Flushed/sec"`
+ DatabasesLogCacheHitRatio float64 `perfdata:"Log Cache Hit Ratio"`
+ DatabasesLogCacheHitRatioBase float64 `perfdata:"Log Cache Hit Ratio Base,secondvalue"`
+ DatabasesLogCacheReadsPerSec float64 `perfdata:"Log Cache Reads/sec"`
+ DatabasesLogFilesSizeKB float64 `perfdata:"Log File(s) Size (KB)"`
+ DatabasesLogFilesUsedSizeKB float64 `perfdata:"Log File(s) Used Size (KB)"`
+ DatabasesLogFlushesPerSec float64 `perfdata:"Log Flushes/sec"`
+ DatabasesLogFlushWaitsPerSec float64 `perfdata:"Log Flush Waits/sec"`
+ DatabasesLogFlushWaitTime float64 `perfdata:"Log Flush Wait Time"`
+ DatabasesLogFlushWriteTimeMS float64 `perfdata:"Log Flush Write Time (ms)"`
+ DatabasesLogGrowths float64 `perfdata:"Log Growths"`
+ DatabasesLogPoolCacheMissesPerSec float64 `perfdata:"Log Pool Cache Misses/sec"`
+ DatabasesLogPoolDiskReadsPerSec float64 `perfdata:"Log Pool Disk Reads/sec"`
+ DatabasesLogPoolHashDeletesPerSec float64 `perfdata:"Log Pool Hash Deletes/sec"`
+ DatabasesLogPoolHashInsertsPerSec float64 `perfdata:"Log Pool Hash Inserts/sec"`
+ DatabasesLogPoolInvalidHashEntryPerSec float64 `perfdata:"Log Pool Invalid Hash Entry/sec"`
+ DatabasesLogPoolLogScanPushesPerSec float64 `perfdata:"Log Pool Log Scan Pushes/sec"`
+ DatabasesLogPoolLogWriterPushesPerSec float64 `perfdata:"Log Pool LogWriter Pushes/sec"`
+ DatabasesLogPoolPushEmptyFreePoolPerSec float64 `perfdata:"Log Pool Push Empty FreePool/sec"`
+ DatabasesLogPoolPushLowMemoryPerSec float64 `perfdata:"Log Pool Push Low Memory/sec"`
+ DatabasesLogPoolPushNoFreeBufferPerSec float64 `perfdata:"Log Pool Push No Free Buffer/sec"`
+ DatabasesLogPoolReqBehindTruncPerSec float64 `perfdata:"Log Pool Req. Behind Trunc/sec"`
+ DatabasesLogPoolRequestsOldVLFPerSec float64 `perfdata:"Log Pool Requests Old VLF/sec"`
+ DatabasesLogPoolRequestsPerSec float64 `perfdata:"Log Pool Requests/sec"`
+ DatabasesLogPoolTotalActiveLogSize float64 `perfdata:"Log Pool Total Active Log Size"`
+ DatabasesLogPoolTotalSharedPoolSize float64 `perfdata:"Log Pool Total Shared Pool Size"`
+ DatabasesLogShrinks float64 `perfdata:"Log Shrinks"`
+ DatabasesLogTruncations float64 `perfdata:"Log Truncations"`
+ DatabasesPercentLogUsed float64 `perfdata:"Percent Log Used"`
+ DatabasesReplPendingXacts float64 `perfdata:"Repl. Pending Xacts"`
+ DatabasesReplTransRate float64 `perfdata:"Repl. Trans. Rate"`
+ DatabasesShrinkDataMovementBytesPerSec float64 `perfdata:"Shrink Data Movement Bytes/sec"`
+ DatabasesTrackedTransactionsPerSec float64 `perfdata:"Tracked transactions/sec"`
+ DatabasesTransactionsPerSec float64 `perfdata:"Transactions/sec"`
+ DatabasesWriteTransactionsPerSec float64 `perfdata:"Write Transactions/sec"`
+ DatabasesXTPControllerDLCLatencyPerFetch float64 `perfdata:"XTP Controller DLC Latency/Fetch"`
+ DatabasesXTPControllerDLCPeakLatency float64 `perfdata:"XTP Controller DLC Peak Latency"`
+ DatabasesXTPControllerLogProcessedPerSec float64 `perfdata:"XTP Controller Log Processed/sec"`
+ DatabasesXTPMemoryUsedKB float64 `perfdata:"XTP Memory Used (KB)"`
+}
+
+type perfDataCounterValuesDatabases2019 struct {
+ Name string
+
+ DatabasesActiveParallelRedoThreads float64 `perfdata:"Active parallel redo threads"`
+}
func (c *Collector) buildDatabases() error {
var err error
- c.databasesPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.databasesPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
+ c.databasesPerfDataCollectors2019 = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- databasesActiveTransactions,
- databasesBackupPerRestoreThroughputPerSec,
- databasesBulkCopyRowsPerSec,
- databasesBulkCopyThroughputPerSec,
- databasesCommitTableEntries,
- databasesDataFilesSizeKB,
- databasesDBCCLogicalScanBytesPerSec,
- databasesGroupCommitTimePerSec,
- databasesLogBytesFlushedPerSec,
- databasesLogCacheHitRatio,
- databasesLogCacheHitRatioBase,
- databasesLogCacheReadsPerSec,
- databasesLogFilesSizeKB,
- databasesLogFilesUsedSizeKB,
- databasesLogFlushesPerSec,
- databasesLogFlushWaitsPerSec,
- databasesLogFlushWaitTime,
- databasesLogFlushWriteTimeMS,
- databasesLogGrowths,
- databasesLogPoolCacheMissesPerSec,
- databasesLogPoolDiskReadsPerSec,
- databasesLogPoolHashDeletesPerSec,
- databasesLogPoolHashInsertsPerSec,
- databasesLogPoolInvalidHashEntryPerSec,
- databasesLogPoolLogScanPushesPerSec,
- databasesLogPoolLogWriterPushesPerSec,
- databasesLogPoolPushEmptyFreePoolPerSec,
- databasesLogPoolPushLowMemoryPerSec,
- databasesLogPoolPushNoFreeBufferPerSec,
- databasesLogPoolReqBehindTruncPerSec,
- databasesLogPoolRequestsOldVLFPerSec,
- databasesLogPoolRequestsPerSec,
- databasesLogPoolTotalActiveLogSize,
- databasesLogPoolTotalSharedPoolSize,
- databasesLogShrinks,
- databasesLogTruncations,
- databasesPercentLogUsed,
- databasesReplPendingXacts,
- databasesReplTransRate,
- databasesShrinkDataMovementBytesPerSec,
- databasesTrackedTransactionsPerSec,
- databasesTransactionsPerSec,
- databasesWriteTransactionsPerSec,
- databasesXTPControllerDLCLatencyPerFetch,
- databasesXTPControllerDLCPeakLatency,
- databasesXTPControllerLogProcessedPerSec,
- databasesXTPMemoryUsedKB,
- }
for _, sqlInstance := range c.mssqlInstances {
- if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) {
- counters = append(counters, databasesActiveParallelRedoThreads)
- }
-
- c.databasesPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), perfdata.InstancesAll, counters)
+ c.databasesPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases](c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err))
}
+
+ if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) {
+ c.databasesPerfDataCollectors2019[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err))
+ }
+ }
}
c.databasesActiveParallelRedoThreads = prometheus.NewDesc(
@@ -487,356 +452,364 @@ func (c *Collector) buildDatabases() error {
}
func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
- return c.collect(ch, subCollectorDatabases, c.databasesPerfDataCollectors, c.collectDatabasesInstance)
+ return errors.Join(
+ c.collect(ch, subCollectorDatabases, c.databasesPerfDataCollectors, c.collectDatabasesInstance),
+ c.collect(ch, "", c.databasesPerfDataCollectors2019, c.collectDatabasesInstance2019),
+ )
}
-func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.databasesPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
}
- for dbName, data := range perfData {
- if counter, ok := data[databasesActiveParallelRedoThreads]; ok {
- ch <- prometheus.MustNewConstMetric(
- c.databasesActiveParallelRedoThreads,
- prometheus.GaugeValue,
- counter.FirstValue,
- sqlInstance, dbName,
- )
- }
-
+ for _, data := range c.databasesPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.databasesActiveTransactions,
prometheus.GaugeValue,
- data[databasesActiveTransactions].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesActiveTransactions,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBackupPerRestoreThroughput,
prometheus.CounterValue,
- data[databasesBackupPerRestoreThroughputPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesBackupPerRestoreThroughputPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBulkCopyRows,
prometheus.CounterValue,
- data[databasesBulkCopyRowsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesBulkCopyRowsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBulkCopyThroughput,
prometheus.CounterValue,
- data[databasesBulkCopyThroughputPerSec].FirstValue*1024,
- sqlInstance, dbName,
+ data.DatabasesBulkCopyThroughputPerSec*1024,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesCommitTableEntries,
prometheus.GaugeValue,
- data[databasesCommitTableEntries].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesCommitTableEntries,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesDataFilesSizeKB,
prometheus.GaugeValue,
- data[databasesDataFilesSizeKB].FirstValue*1024,
- sqlInstance, dbName,
+ data.DatabasesDataFilesSizeKB*1024,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesDBCCLogicalScanBytes,
prometheus.CounterValue,
- data[databasesDBCCLogicalScanBytesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesDBCCLogicalScanBytesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesGroupCommitTime,
prometheus.CounterValue,
- data[databasesGroupCommitTimePerSec].FirstValue/1000000.0,
- sqlInstance, dbName,
+ data.DatabasesGroupCommitTimePerSec/1000000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogBytesFlushed,
prometheus.CounterValue,
- data[databasesLogBytesFlushedPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogBytesFlushedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheHits,
prometheus.GaugeValue,
- data[databasesLogCacheHitRatio].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogCacheHitRatio,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheLookups,
prometheus.GaugeValue,
- data[databasesLogCacheHitRatioBase].SecondValue,
- sqlInstance, dbName,
+ data.DatabasesLogCacheHitRatioBase,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheReads,
prometheus.CounterValue,
- data[databasesLogCacheReadsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogCacheReadsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFilesSizeKB,
prometheus.GaugeValue,
- data[databasesLogFilesSizeKB].FirstValue*1024,
- sqlInstance, dbName,
+ data.DatabasesLogFilesSizeKB*1024,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFilesUsedSizeKB,
prometheus.GaugeValue,
- data[databasesLogFilesUsedSizeKB].FirstValue*1024,
- sqlInstance, dbName,
+ data.DatabasesLogFilesUsedSizeKB*1024,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushes,
prometheus.CounterValue,
- data[databasesLogFlushesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogFlushesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWaits,
prometheus.CounterValue,
- data[databasesLogFlushWaitsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogFlushWaitsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWaitTime,
prometheus.GaugeValue,
- data[databasesLogFlushWaitTime].FirstValue/1000.0,
- sqlInstance, dbName,
+ data.DatabasesLogFlushWaitTime/1000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWriteTimeMS,
prometheus.GaugeValue,
- data[databasesLogFlushWriteTimeMS].FirstValue/1000.0,
- sqlInstance, dbName,
+ data.DatabasesLogFlushWriteTimeMS/1000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogGrowths,
prometheus.GaugeValue,
- data[databasesLogGrowths].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogGrowths,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolCacheMisses,
prometheus.CounterValue,
- data[databasesLogPoolCacheMissesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolCacheMissesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolDiskReads,
prometheus.CounterValue,
- data[databasesLogPoolDiskReadsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolDiskReadsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolHashDeletes,
prometheus.CounterValue,
- data[databasesLogPoolHashDeletesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolHashDeletesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolHashInserts,
prometheus.CounterValue,
- data[databasesLogPoolHashInsertsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolHashInsertsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolInvalidHashEntry,
prometheus.CounterValue,
- data[databasesLogPoolInvalidHashEntryPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolInvalidHashEntryPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolLogScanPushes,
prometheus.CounterValue,
- data[databasesLogPoolLogScanPushesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolLogScanPushesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolLogWriterPushes,
prometheus.CounterValue,
- data[databasesLogPoolLogWriterPushesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolLogWriterPushesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushEmptyFreePool,
prometheus.CounterValue,
- data[databasesLogPoolPushEmptyFreePoolPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolPushEmptyFreePoolPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushLowMemory,
prometheus.CounterValue,
- data[databasesLogPoolPushLowMemoryPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolPushLowMemoryPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushNoFreeBuffer,
prometheus.CounterValue,
- data[databasesLogPoolPushNoFreeBufferPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolPushNoFreeBufferPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolReqBehindTrunc,
prometheus.CounterValue,
- data[databasesLogPoolReqBehindTruncPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolReqBehindTruncPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolRequestsOldVLF,
prometheus.CounterValue,
- data[databasesLogPoolRequestsOldVLFPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolRequestsOldVLFPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolRequests,
prometheus.CounterValue,
- data[databasesLogPoolRequestsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolRequestsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolTotalActiveLogSize,
prometheus.GaugeValue,
- data[databasesLogPoolTotalActiveLogSize].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolTotalActiveLogSize,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolTotalSharedPoolSize,
prometheus.GaugeValue,
- data[databasesLogPoolTotalSharedPoolSize].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogPoolTotalSharedPoolSize,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogShrinks,
prometheus.GaugeValue,
- data[databasesLogShrinks].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogShrinks,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogTruncations,
prometheus.GaugeValue,
- data[databasesLogTruncations].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesLogTruncations,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesPercentLogUsed,
prometheus.GaugeValue,
- data[databasesPercentLogUsed].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesPercentLogUsed,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesReplPendingXacts,
prometheus.GaugeValue,
- data[databasesReplPendingXacts].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesReplPendingXacts,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesReplTransRate,
prometheus.CounterValue,
- data[databasesReplTransRate].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesReplTransRate,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesShrinkDataMovementBytes,
prometheus.CounterValue,
- data[databasesShrinkDataMovementBytesPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesShrinkDataMovementBytesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesTrackedTransactions,
prometheus.CounterValue,
- data[databasesTrackedTransactionsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesTrackedTransactionsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesTransactions,
prometheus.CounterValue,
- data[databasesTransactionsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesTransactionsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesWriteTransactions,
prometheus.CounterValue,
- data[databasesWriteTransactionsPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesWriteTransactionsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerDLCLatencyPerFetch,
prometheus.GaugeValue,
- data[databasesXTPControllerDLCLatencyPerFetch].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesXTPControllerDLCLatencyPerFetch,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerDLCPeakLatency,
prometheus.GaugeValue,
- data[databasesXTPControllerDLCPeakLatency].FirstValue*1000000.0,
- sqlInstance, dbName,
+ data.DatabasesXTPControllerDLCPeakLatency*1000000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerLogProcessed,
prometheus.CounterValue,
- data[databasesXTPControllerLogProcessedPerSec].FirstValue,
- sqlInstance, dbName,
+ data.DatabasesXTPControllerLogProcessedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPMemoryUsedKB,
prometheus.GaugeValue,
- data[databasesXTPMemoryUsedKB].FirstValue*1024,
- sqlInstance, dbName,
+ data.DatabasesXTPMemoryUsedKB*1024,
+ sqlInstance, data.Name,
+ )
+ }
+
+ return nil
+}
+
+func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.databasesPerfDataObject2019)
+ if err != nil {
+ return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
+ }
+
+ for _, data := range c.databasesPerfDataObject2019 {
+ ch <- prometheus.MustNewConstMetric(
+ c.databasesActiveParallelRedoThreads,
+ prometheus.GaugeValue,
+ data.DatabasesActiveParallelRedoThreads,
+ sqlInstance, data.Name,
)
}
diff --git a/internal/collector/mssql/mssql_database_replica.go b/internal/collector/mssql/mssql_database_replica.go
index d89f50856..037a3430d 100644
--- a/internal/collector/mssql/mssql_database_replica.go
+++ b/internal/collector/mssql/mssql_database_replica.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorDatabaseReplica struct {
- dbReplicaPerfDataCollectors map[string]*perfdata.Collector
+ dbReplicaPerfDataCollectors map[string]*pdh.Collector
+ dbReplicaPerfDataObject []perfDataCounterValuesDBReplica
dbReplicaDatabaseFlowControlDelay *prometheus.Desc
dbReplicaDatabaseFlowControls *prometheus.Desc
@@ -53,67 +54,45 @@ type collectorDatabaseReplica struct {
dbReplicaTransactionDelay *prometheus.Desc
}
-const (
- dbReplicaDatabaseFlowControlDelay = "Database Flow Control Delay"
- dbReplicaDatabaseFlowControlsPerSec = "Database Flow Controls/sec"
- dbReplicaFileBytesReceivedPerSec = "File Bytes Received/sec"
- dbReplicaGroupCommitsPerSec = "Group Commits/Sec"
- dbReplicaGroupCommitTime = "Group Commit Time"
- dbReplicaLogApplyPendingQueue = "Log Apply Pending Queue"
- dbReplicaLogApplyReadyQueue = "Log Apply Ready Queue"
- dbReplicaLogBytesCompressedPerSec = "Log Bytes Compressed/sec"
- dbReplicaLogBytesDecompressedPerSec = "Log Bytes Decompressed/sec"
- dbReplicaLogBytesReceivedPerSec = "Log Bytes Received/sec"
- dbReplicaLogCompressionCacheHitsPerSec = "Log Compression Cache hits/sec"
- dbReplicaLogCompressionCacheMissesPerSec = "Log Compression Cache misses/sec"
- dbReplicaLogCompressionsPerSec = "Log Compressions/sec"
- dbReplicaLogDecompressionsPerSec = "Log Decompressions/sec"
- dbReplicaLogRemainingForUndo = "Log remaining for undo"
- dbReplicaLogSendQueue = "Log Send Queue"
- dbReplicaMirroredWriteTransactionsPerSec = "Mirrored Write Transactions/sec"
- dbReplicaRecoveryQueue = "Recovery Queue"
- dbReplicaRedoBlockedPerSec = "Redo blocked/sec"
- dbReplicaRedoBytesRemaining = "Redo Bytes Remaining"
- dbReplicaRedoneBytesPerSec = "Redone Bytes/sec"
- dbReplicaRedonesPerSec = "Redones/sec"
- dbReplicaTotalLogRequiringUndo = "Total Log requiring undo"
- dbReplicaTransactionDelay = "Transaction Delay"
-)
+type perfDataCounterValuesDBReplica struct {
+ Name string
+
+ DbReplicaDatabaseFlowControlDelay float64 `perfdata:"Database Flow Control Delay"`
+ DbReplicaDatabaseFlowControlsPerSec float64 `perfdata:"Database Flow Controls/sec"`
+ DbReplicaFileBytesReceivedPerSec float64 `perfdata:"File Bytes Received/sec"`
+ DbReplicaGroupCommitsPerSec float64 `perfdata:"Group Commits/Sec"`
+ DbReplicaGroupCommitTime float64 `perfdata:"Group Commit Time"`
+ DbReplicaLogApplyPendingQueue float64 `perfdata:"Log Apply Pending Queue"`
+ DbReplicaLogApplyReadyQueue float64 `perfdata:"Log Apply Ready Queue"`
+ DbReplicaLogBytesCompressedPerSec float64 `perfdata:"Log Bytes Compressed/sec"`
+ DbReplicaLogBytesDecompressedPerSec float64 `perfdata:"Log Bytes Decompressed/sec"`
+ DbReplicaLogBytesReceivedPerSec float64 `perfdata:"Log Bytes Received/sec"`
+ DbReplicaLogCompressionCacheHitsPerSec float64 `perfdata:"Log Compression Cache hits/sec"`
+ DbReplicaLogCompressionCacheMissesPerSec float64 `perfdata:"Log Compression Cache misses/sec"`
+ DbReplicaLogCompressionsPerSec float64 `perfdata:"Log Compressions/sec"`
+ DbReplicaLogDecompressionsPerSec float64 `perfdata:"Log Decompressions/sec"`
+ DbReplicaLogRemainingForUndo float64 `perfdata:"Log remaining for undo"`
+ DbReplicaLogSendQueue float64 `perfdata:"Log Send Queue"`
+ DbReplicaMirroredWriteTransactionsPerSec float64 `perfdata:"Mirrored Write Transactions/sec"`
+ DbReplicaRecoveryQueue float64 `perfdata:"Recovery Queue"`
+ DbReplicaRedoBlockedPerSec float64 `perfdata:"Redo blocked/sec"`
+ DbReplicaRedoBytesRemaining float64 `perfdata:"Redo Bytes Remaining"`
+ DbReplicaRedoneBytesPerSec float64 `perfdata:"Redone Bytes/sec"`
+ DbReplicaRedonesPerSec float64 `perfdata:"Redones/sec"`
+ DbReplicaTotalLogRequiringUndo float64 `perfdata:"Total Log requiring undo"`
+ DbReplicaTransactionDelay float64 `perfdata:"Transaction Delay"`
+}
func (c *Collector) buildDatabaseReplica() error {
var err error
- c.dbReplicaPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.dbReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- dbReplicaDatabaseFlowControlDelay,
- dbReplicaDatabaseFlowControlsPerSec,
- dbReplicaFileBytesReceivedPerSec,
- dbReplicaGroupCommitsPerSec,
- dbReplicaGroupCommitTime,
- dbReplicaLogApplyPendingQueue,
- dbReplicaLogApplyReadyQueue,
- dbReplicaLogBytesCompressedPerSec,
- dbReplicaLogBytesDecompressedPerSec,
- dbReplicaLogBytesReceivedPerSec,
- dbReplicaLogCompressionCacheHitsPerSec,
- dbReplicaLogCompressionCacheMissesPerSec,
- dbReplicaLogCompressionsPerSec,
- dbReplicaLogDecompressionsPerSec,
- dbReplicaLogRemainingForUndo,
- dbReplicaLogSendQueue,
- dbReplicaMirroredWriteTransactionsPerSec,
- dbReplicaRecoveryQueue,
- dbReplicaRedoBlockedPerSec,
- dbReplicaRedoBytesRemaining,
- dbReplicaRedoneBytesPerSec,
- dbReplicaRedonesPerSec,
- dbReplicaTotalLogRequiringUndo,
- dbReplicaTransactionDelay,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.dbReplicaPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Database Replica"), perfdata.InstancesAll, counters)
+ c.dbReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDBReplica](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Database Replica"), pdh.InstancesAll,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err))
}
@@ -272,183 +251,179 @@ func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorDatabaseReplica, c.dbReplicaPerfDataCollectors, c.collectDatabaseReplicaInstance)
}
-func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.dbReplicaPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err)
}
- for replicaName, data := range perfData {
+ for _, data := range c.dbReplicaPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.dbReplicaDatabaseFlowControlDelay,
prometheus.GaugeValue,
- data[dbReplicaDatabaseFlowControlDelay].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaDatabaseFlowControlDelay,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaDatabaseFlowControls,
prometheus.CounterValue,
- data[dbReplicaDatabaseFlowControlsPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaDatabaseFlowControlsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaFileBytesReceived,
prometheus.CounterValue,
- data[dbReplicaFileBytesReceivedPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaFileBytesReceivedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaGroupCommits,
prometheus.CounterValue,
- data[dbReplicaGroupCommitsPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaGroupCommitsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaGroupCommitTime,
prometheus.GaugeValue,
- data[dbReplicaGroupCommitTime].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaGroupCommitTime,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogApplyPendingQueue,
prometheus.GaugeValue,
- data[dbReplicaLogApplyPendingQueue].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogApplyPendingQueue,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogApplyReadyQueue,
prometheus.GaugeValue,
- data[dbReplicaLogApplyReadyQueue].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogApplyReadyQueue,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesCompressed,
prometheus.CounterValue,
- data[dbReplicaLogBytesCompressedPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogBytesCompressedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesDecompressed,
prometheus.CounterValue,
- data[dbReplicaLogBytesDecompressedPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogBytesDecompressedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesReceived,
prometheus.CounterValue,
- data[dbReplicaLogBytesReceivedPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogBytesReceivedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressionCachehits,
prometheus.CounterValue,
- data[dbReplicaLogCompressionCacheHitsPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogCompressionCacheHitsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressionCachemisses,
prometheus.CounterValue,
- data[dbReplicaLogCompressionCacheMissesPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogCompressionCacheMissesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressions,
prometheus.CounterValue,
- data[dbReplicaLogCompressionsPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogCompressionsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogDecompressions,
prometheus.CounterValue,
- data[dbReplicaLogDecompressionsPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogDecompressionsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogremainingforundo,
prometheus.GaugeValue,
- data[dbReplicaLogRemainingForUndo].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogRemainingForUndo,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogSendQueue,
prometheus.GaugeValue,
- data[dbReplicaLogSendQueue].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaLogSendQueue,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaMirroredWritetransactions,
prometheus.CounterValue,
- data[dbReplicaMirroredWriteTransactionsPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaMirroredWriteTransactionsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRecoveryQueue,
prometheus.GaugeValue,
- data[dbReplicaRecoveryQueue].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaRecoveryQueue,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoblocked,
prometheus.CounterValue,
- data[dbReplicaRedoBlockedPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaRedoBlockedPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoBytesRemaining,
prometheus.GaugeValue,
- data[dbReplicaRedoBytesRemaining].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaRedoBytesRemaining,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoneBytes,
prometheus.CounterValue,
- data[dbReplicaRedoneBytesPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaRedoneBytesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedones,
prometheus.CounterValue,
- data[dbReplicaRedonesPerSec].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaRedonesPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaTotalLogrequiringundo,
prometheus.GaugeValue,
- data[dbReplicaTotalLogRequiringUndo].FirstValue,
- sqlInstance, replicaName,
+ data.DbReplicaTotalLogRequiringUndo,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaTransactionDelay,
prometheus.GaugeValue,
- data[dbReplicaTransactionDelay].FirstValue/1000.0,
- sqlInstance, replicaName,
+ data.DbReplicaTransactionDelay/1000.0,
+ sqlInstance, data.Name,
)
}
diff --git a/internal/collector/mssql/mssql_general_statistics.go b/internal/collector/mssql/mssql_general_statistics.go
index 4a8248606..00ee2333e 100644
--- a/internal/collector/mssql/mssql_general_statistics.go
+++ b/internal/collector/mssql/mssql_general_statistics.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorGeneralStatistics struct {
- genStatsPerfDataCollectors map[string]*perfdata.Collector
+ genStatsPerfDataCollectors map[string]*pdh.Collector
+ genStatsPerfDataObject []perfDataCounterValuesGenStats
genStatsActiveTempTables *prometheus.Desc
genStatsConnectionReset *prometheus.Desc
@@ -53,67 +54,43 @@ type collectorGeneralStatistics struct {
genStatsUserConnections *prometheus.Desc
}
-const (
- genStatsActiveTempTables = "Active Temp Tables"
- genStatsConnectionResetPerSec = "Connection Reset/sec"
- genStatsEventNotificationsDelayedDrop = "Event Notifications Delayed Drop"
- genStatsHTTPAuthenticatedRequests = "HTTP Authenticated Requests"
- genStatsLogicalConnections = "Logical Connections"
- genStatsLoginsPerSec = "Logins/sec"
- genStatsLogoutsPerSec = "Logouts/sec"
- genStatsMarsDeadlocks = "Mars Deadlocks"
- genStatsNonatomicYieldRate = "Non-atomic yield rate"
- genStatsProcessesBlocked = "Processes blocked"
- genStatsSOAPEmptyRequests = "SOAP Empty Requests"
- genStatsSOAPMethodInvocations = "SOAP Method Invocations"
- genStatsSOAPSessionInitiateRequests = "SOAP Session Initiate Requests"
- genStatsSOAPSessionTerminateRequests = "SOAP Session Terminate Requests"
- genStatsSOAPSQLRequests = "SOAP SQL Requests"
- genStatsSOAPWSDLRequests = "SOAP WSDL Requests"
- genStatsSQLTraceIOProviderLockWaits = "SQL Trace IO Provider Lock Waits"
- genStatsTempdbRecoveryUnitID = "Tempdb recovery unit id"
- genStatsTempdbRowsetID = "Tempdb rowset id"
- genStatsTempTablesCreationRate = "Temp Tables Creation Rate"
- genStatsTempTablesForDestruction = "Temp Tables For Destruction"
- genStatsTraceEventNotificationQueue = "Trace Event Notification Queue"
- genStatsTransactions = "Transactions"
- genStatsUserConnections = "User Connections"
-)
+type perfDataCounterValuesGenStats struct {
+ GenStatsActiveTempTables float64 `perfdata:"Active Temp Tables"`
+ GenStatsConnectionResetPerSec float64 `perfdata:"Connection Reset/sec"`
+ GenStatsEventNotificationsDelayedDrop float64 `perfdata:"Event Notifications Delayed Drop"`
+ GenStatsHTTPAuthenticatedRequests float64 `perfdata:"HTTP Authenticated Requests"`
+ GenStatsLogicalConnections float64 `perfdata:"Logical Connections"`
+ GenStatsLoginsPerSec float64 `perfdata:"Logins/sec"`
+ GenStatsLogoutsPerSec float64 `perfdata:"Logouts/sec"`
+ GenStatsMarsDeadlocks float64 `perfdata:"Mars Deadlocks"`
+ GenStatsNonatomicYieldRate float64 `perfdata:"Non-atomic yield rate"`
+ GenStatsProcessesBlocked float64 `perfdata:"Processes blocked"`
+ GenStatsSOAPEmptyRequests float64 `perfdata:"SOAP Empty Requests"`
+ GenStatsSOAPMethodInvocations float64 `perfdata:"SOAP Method Invocations"`
+ GenStatsSOAPSessionInitiateRequests float64 `perfdata:"SOAP Session Initiate Requests"`
+ GenStatsSOAPSessionTerminateRequests float64 `perfdata:"SOAP Session Terminate Requests"`
+ GenStatsSOAPSQLRequests float64 `perfdata:"SOAP SQL Requests"`
+ GenStatsSOAPWSDLRequests float64 `perfdata:"SOAP WSDL Requests"`
+ GenStatsSQLTraceIOProviderLockWaits float64 `perfdata:"SQL Trace IO Provider Lock Waits"`
+ GenStatsTempdbRecoveryUnitID float64 `perfdata:"Tempdb recovery unit id"`
+ GenStatsTempdbRowsetID float64 `perfdata:"Tempdb rowset id"`
+ GenStatsTempTablesCreationRate float64 `perfdata:"Temp Tables Creation Rate"`
+ GenStatsTempTablesForDestruction float64 `perfdata:"Temp Tables For Destruction"`
+ GenStatsTraceEventNotificationQueue float64 `perfdata:"Trace Event Notification Queue"`
+ GenStatsTransactions float64 `perfdata:"Transactions"`
+ GenStatsUserConnections float64 `perfdata:"User Connections"`
+}
func (c *Collector) buildGeneralStatistics() error {
var err error
- c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.genStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- genStatsActiveTempTables,
- genStatsConnectionResetPerSec,
- genStatsEventNotificationsDelayedDrop,
- genStatsHTTPAuthenticatedRequests,
- genStatsLogicalConnections,
- genStatsLoginsPerSec,
- genStatsLogoutsPerSec,
- genStatsMarsDeadlocks,
- genStatsNonatomicYieldRate,
- genStatsProcessesBlocked,
- genStatsSOAPEmptyRequests,
- genStatsSOAPMethodInvocations,
- genStatsSOAPSessionInitiateRequests,
- genStatsSOAPSessionTerminateRequests,
- genStatsSOAPSQLRequests,
- genStatsSOAPWSDLRequests,
- genStatsSQLTraceIOProviderLockWaits,
- genStatsTempdbRecoveryUnitID,
- genStatsTempdbRowsetID,
- genStatsTempTablesCreationRate,
- genStatsTempTablesForDestruction,
- genStatsTraceEventNotificationQueue,
- genStatsTransactions,
- genStatsUserConnections,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.genStatsPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "General Statistics"), nil, counters)
+ c.genStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesGenStats](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "General Statistics"), nil,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err))
}
@@ -272,186 +249,177 @@ func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error
return c.collect(ch, subCollectorGeneralStatistics, c.genStatsPerfDataCollectors, c.collectGeneralStatisticsInstance)
}
-func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.genStatsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"))
- }
-
ch <- prometheus.MustNewConstMetric(
c.genStatsActiveTempTables,
prometheus.GaugeValue,
- data[genStatsActiveTempTables].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsActiveTempTables,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsConnectionReset,
prometheus.CounterValue,
- data[genStatsConnectionResetPerSec].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsConnectionResetPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsEventNotificationsDelayedDrop,
prometheus.GaugeValue,
- data[genStatsEventNotificationsDelayedDrop].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsEventNotificationsDelayedDrop,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsHTTPAuthenticatedRequests,
prometheus.GaugeValue,
- data[genStatsHTTPAuthenticatedRequests].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsHTTPAuthenticatedRequests,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogicalConnections,
prometheus.GaugeValue,
- data[genStatsLogicalConnections].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsLogicalConnections,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogins,
prometheus.CounterValue,
- data[genStatsLoginsPerSec].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsLoginsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogouts,
prometheus.CounterValue,
- data[genStatsLogoutsPerSec].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsLogoutsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsMarsDeadlocks,
prometheus.GaugeValue,
- data[genStatsMarsDeadlocks].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsMarsDeadlocks,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsNonAtomicYieldRate,
prometheus.CounterValue,
- data[genStatsNonatomicYieldRate].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsNonatomicYieldRate,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsProcessesBlocked,
prometheus.GaugeValue,
- data[genStatsProcessesBlocked].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsProcessesBlocked,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPEmptyRequests,
prometheus.GaugeValue,
- data[genStatsSOAPEmptyRequests].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSOAPEmptyRequests,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPMethodInvocations,
prometheus.GaugeValue,
- data[genStatsSOAPMethodInvocations].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSOAPMethodInvocations,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSessionInitiateRequests,
prometheus.GaugeValue,
- data[genStatsSOAPSessionInitiateRequests].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSOAPSessionInitiateRequests,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSessionTerminateRequests,
prometheus.GaugeValue,
- data[genStatsSOAPSessionTerminateRequests].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSOAPSessionTerminateRequests,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSQLRequests,
prometheus.GaugeValue,
- data[genStatsSOAPSQLRequests].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSOAPSQLRequests,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPWSDLRequests,
prometheus.GaugeValue,
- data[genStatsSOAPWSDLRequests].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSOAPWSDLRequests,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSQLTraceIOProviderLockWaits,
prometheus.GaugeValue,
- data[genStatsSQLTraceIOProviderLockWaits].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsSQLTraceIOProviderLockWaits,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempDBRecoveryUnitID,
prometheus.GaugeValue,
- data[genStatsTempdbRecoveryUnitID].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsTempdbRecoveryUnitID,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempDBrowSetID,
prometheus.GaugeValue,
- data[genStatsTempdbRowsetID].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsTempdbRowsetID,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempTablesCreationRate,
prometheus.CounterValue,
- data[genStatsTempTablesCreationRate].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsTempTablesCreationRate,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempTablesForDestruction,
prometheus.GaugeValue,
- data[genStatsTempTablesForDestruction].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsTempTablesForDestruction,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTraceEventNotificationQueue,
prometheus.GaugeValue,
- data[genStatsTraceEventNotificationQueue].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsTraceEventNotificationQueue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTransactions,
prometheus.GaugeValue,
- data[genStatsTransactions].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsTransactions,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsUserConnections,
prometheus.GaugeValue,
- data[genStatsUserConnections].FirstValue,
+ c.genStatsPerfDataObject[0].GenStatsUserConnections,
sqlInstance,
)
diff --git a/internal/collector/mssql/mssql_locks.go b/internal/collector/mssql/mssql_locks.go
index 503befd67..5199b77bd 100644
--- a/internal/collector/mssql/mssql_locks.go
+++ b/internal/collector/mssql/mssql_locks.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorLocks struct {
- locksPerfDataCollectors map[string]*perfdata.Collector
+ locksPerfDataCollectors map[string]*pdh.Collector
+ locksPerfDataObject []perfDataCounterValuesLocks
// Win32_PerfRawData_{instance}_SQLServerLocks
locksWaitTime *prometheus.Desc
@@ -38,35 +39,29 @@ type collectorLocks struct {
locksNumberOfDeadlocks *prometheus.Desc
}
-const (
- locksAverageWaitTimeMS = "Average Wait Time (ms)"
- locksAverageWaitTimeMSBase = "Average Wait Time Base"
- locksLockRequestsPerSec = "Lock Requests/sec"
- locksLockTimeoutsPerSec = "Lock Timeouts/sec"
- locksLockTimeoutsTimeout0PerSec = "Lock Timeouts (timeout > 0)/sec"
- locksLockWaitsPerSec = "Lock Waits/sec"
- locksLockWaitTimeMS = "Lock Wait Time (ms)"
- locksNumberOfDeadlocksPerSec = "Number of Deadlocks/sec"
-)
+type perfDataCounterValuesLocks struct {
+ Name string
+
+ LocksAverageWaitTimeMS float64 `perfdata:"Average Wait Time (ms)"`
+ LocksAverageWaitTimeMSBase float64 `perfdata:"Average Wait Time Base,secondvalue"`
+ LocksLockRequestsPerSec float64 `perfdata:"Lock Requests/sec"`
+ LocksLockTimeoutsPerSec float64 `perfdata:"Lock Timeouts/sec"`
+ LocksLockTimeoutsTimeout0PerSec float64 `perfdata:"Lock Timeouts (timeout > 0)/sec"`
+ LocksLockWaitsPerSec float64 `perfdata:"Lock Waits/sec"`
+ LocksLockWaitTimeMS float64 `perfdata:"Lock Wait Time (ms)"`
+ LocksNumberOfDeadlocksPerSec float64 `perfdata:"Number of Deadlocks/sec"`
+}
func (c *Collector) buildLocks() error {
var err error
- c.locksPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.locksPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- locksAverageWaitTimeMS,
- locksAverageWaitTimeMSBase,
- locksLockRequestsPerSec,
- locksLockTimeoutsPerSec,
- locksLockTimeoutsTimeout0PerSec,
- locksLockWaitsPerSec,
- locksLockWaitTimeMS,
- locksNumberOfDeadlocksPerSec,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.locksPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Locks"), perfdata.InstancesAll, counters)
+ c.locksPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesLocks](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Locks"), pdh.InstancesAll,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err))
}
@@ -128,71 +123,67 @@ func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorLocks, c.locksPerfDataCollectors, c.collectLocksInstance)
}
-func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.locksPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err)
}
- for lockResourceName, data := range perfData {
+ for _, data := range c.locksPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.locksWaitTime,
prometheus.GaugeValue,
- data[locksAverageWaitTimeMS].FirstValue/1000.0,
- sqlInstance, lockResourceName,
+ data.LocksAverageWaitTimeMS/1000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksCount,
prometheus.GaugeValue,
- data[locksAverageWaitTimeMSBase].SecondValue/1000.0,
- sqlInstance, lockResourceName,
+ data.LocksAverageWaitTimeMSBase/1000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockRequests,
prometheus.CounterValue,
- data[locksLockRequestsPerSec].FirstValue,
- sqlInstance, lockResourceName,
+ data.LocksLockRequestsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockTimeouts,
prometheus.CounterValue,
- data[locksLockTimeoutsPerSec].FirstValue,
- sqlInstance, lockResourceName,
+ data.LocksLockTimeoutsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockTimeoutstimeout0,
prometheus.CounterValue,
- data[locksLockTimeoutsTimeout0PerSec].FirstValue,
- sqlInstance, lockResourceName,
+ data.LocksLockTimeoutsTimeout0PerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockWaits,
prometheus.CounterValue,
- data[locksLockWaitsPerSec].FirstValue,
- sqlInstance, lockResourceName,
+ data.LocksLockWaitsPerSec,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockWaitTimeMS,
prometheus.GaugeValue,
- data[locksLockWaitTimeMS].FirstValue/1000.0,
- sqlInstance, lockResourceName,
+ data.LocksLockWaitTimeMS/1000.0,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksNumberOfDeadlocks,
prometheus.CounterValue,
- data[locksNumberOfDeadlocksPerSec].FirstValue,
- sqlInstance, lockResourceName,
+ data.LocksNumberOfDeadlocksPerSec,
+ sqlInstance, data.Name,
)
}
diff --git a/internal/collector/mssql/mssql_memory_manager.go b/internal/collector/mssql/mssql_memory_manager.go
index 9d397629c..508043ed3 100644
--- a/internal/collector/mssql/mssql_memory_manager.go
+++ b/internal/collector/mssql/mssql_memory_manager.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorMemoryManager struct {
- memMgrPerfDataCollectors map[string]*perfdata.Collector
+ memMgrPerfDataCollectors map[string]*pdh.Collector
+ memMgrPerfDataObject []perfDataCounterValuesMemMgr
memMgrConnectionMemoryKB *prometheus.Desc
memMgrDatabaseCacheMemoryKB *prometheus.Desc
@@ -49,59 +50,39 @@ type collectorMemoryManager struct {
memMgrTotalServerMemoryKB *prometheus.Desc
}
-const (
- memMgrConnectionMemoryKB = "Connection Memory (KB)"
- memMgrDatabaseCacheMemoryKB = "Database Cache Memory (KB)"
- memMgrExternalBenefitOfMemory = "External benefit of memory"
- memMgrFreeMemoryKB = "Free Memory (KB)"
- memMgrGrantedWorkspaceMemoryKB = "Granted Workspace Memory (KB)"
- memMgrLockBlocks = "Lock Blocks"
- memMgrLockBlocksAllocated = "Lock Blocks Allocated"
- memMgrLockMemoryKB = "Lock Memory (KB)"
- memMgrLockOwnerBlocks = "Lock Owner Blocks"
- memMgrLockOwnerBlocksAllocated = "Lock Owner Blocks Allocated"
- memMgrLogPoolMemoryKB = "Log Pool Memory (KB)"
- memMgrMaximumWorkspaceMemoryKB = "Maximum Workspace Memory (KB)"
- memMgrMemoryGrantsOutstanding = "Memory Grants Outstanding"
- memMgrMemoryGrantsPending = "Memory Grants Pending"
- memMgrOptimizerMemoryKB = "Optimizer Memory (KB)"
- memMgrReservedServerMemoryKB = "Reserved Server Memory (KB)"
- memMgrSQLCacheMemoryKB = "SQL Cache Memory (KB)"
- memMgrStolenServerMemoryKB = "Stolen Server Memory (KB)"
- memMgrTargetServerMemoryKB = "Target Server Memory (KB)"
- memMgrTotalServerMemoryKB = "Total Server Memory (KB)"
-)
+type perfDataCounterValuesMemMgr struct {
+ MemMgrConnectionMemoryKB float64 `perfdata:"Connection Memory (KB)"`
+ MemMgrDatabaseCacheMemoryKB float64 `perfdata:"Database Cache Memory (KB)"`
+ MemMgrExternalBenefitOfMemory float64 `perfdata:"External benefit of memory"`
+ MemMgrFreeMemoryKB float64 `perfdata:"Free Memory (KB)"`
+ MemMgrGrantedWorkspaceMemoryKB float64 `perfdata:"Granted Workspace Memory (KB)"`
+ MemMgrLockBlocks float64 `perfdata:"Lock Blocks"`
+ MemMgrLockBlocksAllocated float64 `perfdata:"Lock Blocks Allocated"`
+ MemMgrLockMemoryKB float64 `perfdata:"Lock Memory (KB)"`
+ MemMgrLockOwnerBlocks float64 `perfdata:"Lock Owner Blocks"`
+ MemMgrLockOwnerBlocksAllocated float64 `perfdata:"Lock Owner Blocks Allocated"`
+ MemMgrLogPoolMemoryKB float64 `perfdata:"Log Pool Memory (KB)"`
+ MemMgrMaximumWorkspaceMemoryKB float64 `perfdata:"Maximum Workspace Memory (KB)"`
+ MemMgrMemoryGrantsOutstanding float64 `perfdata:"Memory Grants Outstanding"`
+ MemMgrMemoryGrantsPending float64 `perfdata:"Memory Grants Pending"`
+ MemMgrOptimizerMemoryKB float64 `perfdata:"Optimizer Memory (KB)"`
+ MemMgrReservedServerMemoryKB float64 `perfdata:"Reserved Server Memory (KB)"`
+ MemMgrSQLCacheMemoryKB float64 `perfdata:"SQL Cache Memory (KB)"`
+ MemMgrStolenServerMemoryKB float64 `perfdata:"Stolen Server Memory (KB)"`
+ MemMgrTargetServerMemoryKB float64 `perfdata:"Target Server Memory (KB)"`
+ MemMgrTotalServerMemoryKB float64 `perfdata:"Total Server Memory (KB)"`
+}
func (c *Collector) buildMemoryManager() error {
var err error
- c.memMgrPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.memMgrPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- memMgrConnectionMemoryKB,
- memMgrDatabaseCacheMemoryKB,
- memMgrExternalBenefitOfMemory,
- memMgrFreeMemoryKB,
- memMgrGrantedWorkspaceMemoryKB,
- memMgrLockBlocks,
- memMgrLockBlocksAllocated,
- memMgrLockMemoryKB,
- memMgrLockOwnerBlocks,
- memMgrLockOwnerBlocksAllocated,
- memMgrLogPoolMemoryKB,
- memMgrMaximumWorkspaceMemoryKB,
- memMgrMemoryGrantsOutstanding,
- memMgrMemoryGrantsPending,
- memMgrOptimizerMemoryKB,
- memMgrReservedServerMemoryKB,
- memMgrSQLCacheMemoryKB,
- memMgrStolenServerMemoryKB,
- memMgrTargetServerMemoryKB,
- memMgrTotalServerMemoryKB,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.memMgrPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Memory Manager"), perfdata.InstancesAll, counters)
+ c.memMgrPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesMemMgr](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Memory Manager"), pdh.InstancesAll,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err))
}
@@ -235,158 +216,149 @@ func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorMemoryManager, c.memMgrPerfDataCollectors, c.collectMemoryManagerInstance)
}
-func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.memMgrPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"))
- }
-
ch <- prometheus.MustNewConstMetric(
c.memMgrConnectionMemoryKB,
prometheus.GaugeValue,
- data[memMgrConnectionMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrConnectionMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrDatabaseCacheMemoryKB,
prometheus.GaugeValue,
- data[memMgrDatabaseCacheMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrDatabaseCacheMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrExternalBenefitOfMemory,
prometheus.GaugeValue,
- data[memMgrExternalBenefitOfMemory].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrExternalBenefitOfMemory,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrFreeMemoryKB,
prometheus.GaugeValue,
- data[memMgrFreeMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrFreeMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrGrantedWorkspaceMemoryKB,
prometheus.GaugeValue,
- data[memMgrGrantedWorkspaceMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrGrantedWorkspaceMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockBlocks,
prometheus.GaugeValue,
- data[memMgrLockBlocks].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrLockBlocks,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockBlocksAllocated,
prometheus.GaugeValue,
- data[memMgrLockBlocksAllocated].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrLockBlocksAllocated,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockMemoryKB,
prometheus.GaugeValue,
- data[memMgrLockMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrLockMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockOwnerBlocks,
prometheus.GaugeValue,
- data[memMgrLockOwnerBlocks].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocks,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockOwnerBlocksAllocated,
prometheus.GaugeValue,
- data[memMgrLockOwnerBlocksAllocated].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocksAllocated,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLogPoolMemoryKB,
prometheus.GaugeValue,
- data[memMgrLogPoolMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrLogPoolMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMaximumWorkspaceMemoryKB,
prometheus.GaugeValue,
- data[memMgrMaximumWorkspaceMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrMaximumWorkspaceMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMemoryGrantsOutstanding,
prometheus.GaugeValue,
- data[memMgrMemoryGrantsOutstanding].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrMemoryGrantsOutstanding,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMemoryGrantsPending,
prometheus.GaugeValue,
- data[memMgrMemoryGrantsPending].FirstValue,
+ c.memMgrPerfDataObject[0].MemMgrMemoryGrantsPending,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrOptimizerMemoryKB,
prometheus.GaugeValue,
- data[memMgrOptimizerMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrOptimizerMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrReservedServerMemoryKB,
prometheus.GaugeValue,
- data[memMgrReservedServerMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrReservedServerMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrSQLCacheMemoryKB,
prometheus.GaugeValue,
- data[memMgrSQLCacheMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrSQLCacheMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrStolenServerMemoryKB,
prometheus.GaugeValue,
- data[memMgrStolenServerMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrStolenServerMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrTargetServerMemoryKB,
prometheus.GaugeValue,
- data[memMgrTargetServerMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrTargetServerMemoryKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrTotalServerMemoryKB,
prometheus.GaugeValue,
- data[memMgrTotalServerMemoryKB].FirstValue*1024,
+ c.memMgrPerfDataObject[0].MemMgrTotalServerMemoryKB*1024,
sqlInstance,
)
diff --git a/internal/collector/mssql/mssql_sql_errors.go b/internal/collector/mssql/mssql_sql_errors.go
index 8f4392c51..e4033fa7d 100644
--- a/internal/collector/mssql/mssql_sql_errors.go
+++ b/internal/collector/mssql/mssql_sql_errors.go
@@ -19,33 +19,35 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorSQLErrors struct {
- sqlErrorsPerfDataCollectors map[string]*perfdata.Collector
+ sqlErrorsPerfDataCollectors map[string]*pdh.Collector
+ sqlErrorsPerfDataObject []perfDataCounterValuesSqlErrors
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
sqlErrorsTotal *prometheus.Desc
}
-const (
- sqlErrorsErrorsPerSec = "Errors/sec"
-)
+type perfDataCounterValuesSqlErrors struct {
+ Name string
+
+ SqlErrorsErrorsPerSec float64 `perfdata:"Errors/sec"`
+}
func (c *Collector) buildSQLErrors() error {
var err error
- c.sqlErrorsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.sqlErrorsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- sqlErrorsErrorsPerSec,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.sqlErrorsPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Errors"), perfdata.InstancesAll, counters)
+ c.sqlErrorsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Errors"), pdh.InstancesAll,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err))
}
@@ -66,22 +68,18 @@ func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorSQLErrors, c.sqlErrorsPerfDataCollectors, c.collectSQLErrorsInstance)
}
-func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.sqlErrorsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err)
}
- for resource, data := range perfData {
+ for _, data := range c.sqlErrorsPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.sqlErrorsTotal,
prometheus.CounterValue,
- data[sqlErrorsErrorsPerSec].FirstValue,
- sqlInstance, resource,
+ data.SqlErrorsErrorsPerSec,
+ sqlInstance, data.Name,
)
}
diff --git a/internal/collector/mssql/mssql_sql_stats.go b/internal/collector/mssql/mssql_sql_stats.go
index f68234952..453b9f74f 100644
--- a/internal/collector/mssql/mssql_sql_stats.go
+++ b/internal/collector/mssql/mssql_sql_stats.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorSQLStats struct {
- sqlStatsPerfDataCollectors map[string]*perfdata.Collector
+ sqlStatsPerfDataCollectors map[string]*pdh.Collector
+ sqlStatsPerfDataObject []perfDataCounterValuesSqlStats
sqlStatsAutoParamAttempts *prometheus.Desc
sqlStatsBatchRequests *prometheus.Desc
@@ -40,41 +41,30 @@ type collectorSQLStats struct {
sqlStatsUnsafeAutoParams *prometheus.Desc
}
-const (
- sqlStatsAutoParamAttemptsPerSec = "Auto-Param Attempts/sec"
- sqlStatsBatchRequestsPerSec = "Batch Requests/sec"
- sqlStatsFailedAutoParamsPerSec = "Failed Auto-Params/sec"
- sqlStatsForcedParameterizationsPerSec = "Forced Parameterizations/sec"
- sqlStatsGuidedplanexecutionsPerSec = "Guided plan executions/sec"
- sqlStatsMisguidedplanexecutionsPerSec = "Misguided plan executions/sec"
- sqlStatsSafeAutoParamsPerSec = "Safe Auto-Params/sec"
- sqlStatsSQLAttentionrate = "SQL Attention rate"
- sqlStatsSQLCompilationsPerSec = "SQL Compilations/sec"
- sqlStatsSQLReCompilationsPerSec = "SQL Re-Compilations/sec"
- sqlStatsUnsafeAutoParamsPerSec = "Unsafe Auto-Params/sec"
-)
+type perfDataCounterValuesSqlStats struct {
+ SqlStatsAutoParamAttemptsPerSec float64 `perfdata:"Auto-Param Attempts/sec"`
+ SqlStatsBatchRequestsPerSec float64 `perfdata:"Batch Requests/sec"`
+ SqlStatsFailedAutoParamsPerSec float64 `perfdata:"Failed Auto-Params/sec"`
+ SqlStatsForcedParameterizationsPerSec float64 `perfdata:"Forced Parameterizations/sec"`
+ SqlStatsGuidedplanexecutionsPerSec float64 `perfdata:"Guided plan executions/sec"`
+ SqlStatsMisguidedplanexecutionsPerSec float64 `perfdata:"Misguided plan executions/sec"`
+ SqlStatsSafeAutoParamsPerSec float64 `perfdata:"Safe Auto-Params/sec"`
+ SqlStatsSQLAttentionrate float64 `perfdata:"SQL Attention rate"`
+ SqlStatsSQLCompilationsPerSec float64 `perfdata:"SQL Compilations/sec"`
+ SqlStatsSQLReCompilationsPerSec float64 `perfdata:"SQL Re-Compilations/sec"`
+ SqlStatsUnsafeAutoParamsPerSec float64 `perfdata:"Unsafe Auto-Params/sec"`
+}
func (c *Collector) buildSQLStats() error {
var err error
- c.sqlStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.sqlStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- sqlStatsAutoParamAttemptsPerSec,
- sqlStatsBatchRequestsPerSec,
- sqlStatsFailedAutoParamsPerSec,
- sqlStatsForcedParameterizationsPerSec,
- sqlStatsGuidedplanexecutionsPerSec,
- sqlStatsMisguidedplanexecutionsPerSec,
- sqlStatsSafeAutoParamsPerSec,
- sqlStatsSQLAttentionrate,
- sqlStatsSQLCompilationsPerSec,
- sqlStatsSQLReCompilationsPerSec,
- sqlStatsUnsafeAutoParamsPerSec,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.sqlStatsPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Statistics"), nil, counters)
+ c.sqlStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlStats](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Statistics"), nil,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance.name, err))
}
@@ -154,95 +144,86 @@ func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorSQLStats, c.sqlStatsPerfDataCollectors, c.collectSQLStatsInstance)
}
-func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.sqlStatsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"))
- }
-
ch <- prometheus.MustNewConstMetric(
c.sqlStatsAutoParamAttempts,
prometheus.CounterValue,
- data[sqlStatsAutoParamAttemptsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsAutoParamAttemptsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsBatchRequests,
prometheus.CounterValue,
- data[sqlStatsBatchRequestsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsBatchRequestsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsFailedAutoParams,
prometheus.CounterValue,
- data[sqlStatsFailedAutoParamsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsFailedAutoParamsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsForcedParameterizations,
prometheus.CounterValue,
- data[sqlStatsForcedParameterizationsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsForcedParameterizationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsGuidedplanexecutions,
prometheus.CounterValue,
- data[sqlStatsGuidedplanexecutionsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsGuidedplanexecutionsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsMisguidedplanexecutions,
prometheus.CounterValue,
- data[sqlStatsMisguidedplanexecutionsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsMisguidedplanexecutionsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSafeAutoParams,
prometheus.CounterValue,
- data[sqlStatsSafeAutoParamsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsSafeAutoParamsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLAttentionrate,
prometheus.CounterValue,
- data[sqlStatsSQLAttentionrate].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsSQLAttentionrate,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLCompilations,
prometheus.CounterValue,
- data[sqlStatsSQLCompilationsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsSQLCompilationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLReCompilations,
prometheus.CounterValue,
- data[sqlStatsSQLReCompilationsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsSQLReCompilationsPerSec,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsUnsafeAutoParams,
prometheus.CounterValue,
- data[sqlStatsUnsafeAutoParamsPerSec].FirstValue,
+ c.sqlStatsPerfDataObject[0].SqlStatsUnsafeAutoParamsPerSec,
sqlInstance,
)
diff --git a/internal/collector/mssql/mssql_transactions.go b/internal/collector/mssql/mssql_transactions.go
index fc8ecea7e..f76f50bce 100644
--- a/internal/collector/mssql/mssql_transactions.go
+++ b/internal/collector/mssql/mssql_transactions.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorTransactions struct {
- transactionsPerfDataCollectors map[string]*perfdata.Collector
+ transactionsPerfDataCollectors map[string]*pdh.Collector
+ transactionsPerfDataObject []perfDataCounterValuesTransactions
transactionsTempDbFreeSpaceBytes *prometheus.Desc
transactionsLongestTransactionRunningSeconds *prometheus.Desc
@@ -42,45 +43,32 @@ type collectorTransactions struct {
transactionsVersionStoreTruncationUnits *prometheus.Desc
}
-const (
- transactionsFreeSpaceintempdbKB = "Free Space in tempdb (KB)"
- transactionsLongestTransactionRunningTime = "Longest Transaction Running Time"
- transactionsNonSnapshotVersionTransactions = "NonSnapshot Version Transactions"
- transactionsSnapshotTransactions = "Snapshot Transactions"
- transactionsTransactions = "Transactions"
- transactionsUpdateconflictratio = "Update conflict ratio"
- transactionsUpdateSnapshotTransactions = "Update Snapshot Transactions"
- transactionsVersionCleanuprateKBPers = "Version Cleanup rate (KB/s)"
- transactionsVersionGenerationrateKBPers = "Version Generation rate (KB/s)"
- transactionsVersionStoreSizeKB = "Version Store Size (KB)"
- transactionsVersionStoreunitcount = "Version Store unit count"
- transactionsVersionStoreunitcreation = "Version Store unit creation"
- transactionsVersionStoreunittruncation = "Version Store unit truncation"
-)
+type perfDataCounterValuesTransactions struct {
+ TransactionsFreeSpaceintempdbKB float64 `perfdata:"Free Space in tempdb (KB)"`
+ TransactionsLongestTransactionRunningTime float64 `perfdata:"Longest Transaction Running Time"`
+ TransactionsNonSnapshotVersionTransactions float64 `perfdata:"NonSnapshot Version Transactions"`
+ TransactionsSnapshotTransactions float64 `perfdata:"Snapshot Transactions"`
+ TransactionsTransactions float64 `perfdata:"Transactions"`
+ TransactionsUpdateconflictratio float64 `perfdata:"Update conflict ratio"`
+ TransactionsUpdateSnapshotTransactions float64 `perfdata:"Update Snapshot Transactions"`
+ TransactionsVersionCleanuprateKBPers float64 `perfdata:"Version Cleanup rate (KB/s)"`
+ TransactionsVersionGenerationrateKBPers float64 `perfdata:"Version Generation rate (KB/s)"`
+ TransactionsVersionStoreSizeKB float64 `perfdata:"Version Store Size (KB)"`
+ TransactionsVersionStoreunitcount float64 `perfdata:"Version Store unit count"`
+ TransactionsVersionStoreunitcreation float64 `perfdata:"Version Store unit creation"`
+ TransactionsVersionStoreunittruncation float64 `perfdata:"Version Store unit truncation"`
+}
func (c *Collector) buildTransactions() error {
var err error
- c.transactionsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.transactionsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- transactionsFreeSpaceintempdbKB,
- transactionsLongestTransactionRunningTime,
- transactionsNonSnapshotVersionTransactions,
- transactionsSnapshotTransactions,
- transactionsTransactions,
- transactionsUpdateconflictratio,
- transactionsUpdateSnapshotTransactions,
- transactionsVersionCleanuprateKBPers,
- transactionsVersionGenerationrateKBPers,
- transactionsVersionStoreSizeKB,
- transactionsVersionStoreunitcount,
- transactionsVersionStoreunitcreation,
- transactionsVersionStoreunittruncation,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.transactionsPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Transactions"), nil, counters)
+ c.transactionsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesTransactions](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Transactions"), nil,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance.name, err))
}
@@ -174,109 +162,100 @@ func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
-func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.transactionsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"))
- }
-
ch <- prometheus.MustNewConstMetric(
c.transactionsTempDbFreeSpaceBytes,
prometheus.GaugeValue,
- data[transactionsFreeSpaceintempdbKB].FirstValue*1024,
+ c.transactionsPerfDataObject[0].TransactionsFreeSpaceintempdbKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsLongestTransactionRunningSeconds,
prometheus.GaugeValue,
- data[transactionsLongestTransactionRunningTime].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsLongestTransactionRunningTime,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsNonSnapshotVersionActiveTotal,
prometheus.CounterValue,
- data[transactionsNonSnapshotVersionTransactions].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsNonSnapshotVersionTransactions,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsSnapshotActiveTotal,
prometheus.CounterValue,
- data[transactionsSnapshotTransactions].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsSnapshotTransactions,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsActive,
prometheus.GaugeValue,
- data[transactionsTransactions].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsTransactions,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsUpdateConflictsTotal,
prometheus.CounterValue,
- data[transactionsUpdateconflictratio].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsUpdateconflictratio,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsUpdateSnapshotActiveTotal,
prometheus.CounterValue,
- data[transactionsUpdateSnapshotTransactions].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsUpdateSnapshotTransactions,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionCleanupRateBytes,
prometheus.GaugeValue,
- data[transactionsVersionCleanuprateKBPers].FirstValue*1024,
+ c.transactionsPerfDataObject[0].TransactionsVersionCleanuprateKBPers*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionGenerationRateBytes,
prometheus.GaugeValue,
- data[transactionsVersionGenerationrateKBPers].FirstValue*1024,
+ c.transactionsPerfDataObject[0].TransactionsVersionGenerationrateKBPers*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreSizeBytes,
prometheus.GaugeValue,
- data[transactionsVersionStoreSizeKB].FirstValue*1024,
+ c.transactionsPerfDataObject[0].TransactionsVersionStoreSizeKB*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreUnits,
prometheus.CounterValue,
- data[transactionsVersionStoreunitcount].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcount,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreCreationUnits,
prometheus.CounterValue,
- data[transactionsVersionStoreunitcreation].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcreation,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreTruncationUnits,
prometheus.CounterValue,
- data[transactionsVersionStoreunittruncation].FirstValue,
+ c.transactionsPerfDataObject[0].TransactionsVersionStoreunittruncation,
sqlInstance,
)
diff --git a/internal/collector/mssql/mssql_wait_stats.go b/internal/collector/mssql/mssql_wait_stats.go
index 720ac31f1..70c96caf7 100644
--- a/internal/collector/mssql/mssql_wait_stats.go
+++ b/internal/collector/mssql/mssql_wait_stats.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorWaitStats struct {
- waitStatsPerfDataCollectors map[string]*perfdata.Collector
+ waitStatsPerfDataCollectors map[string]*pdh.Collector
+ waitStatsPerfDataObject []perfDataCounterValuesWaitStats
waitStatsLockWaits *prometheus.Desc
waitStatsMemoryGrantQueueWaits *prometheus.Desc
@@ -41,43 +42,33 @@ type collectorWaitStats struct {
waitStatsTransactionOwnershipWaits *prometheus.Desc
}
-const (
- waitStatsLockWaits = "Lock waits"
- waitStatsMemoryGrantQueueWaits = "Memory grant queue waits"
- waitStatsThreadSafeMemoryObjectsWaits = "Thread-safe memory objects waits"
- waitStatsLogWriteWaits = "Log write waits"
- waitStatsLogBufferWaits = "Log buffer waits"
- waitStatsNetworkIOWaits = "Network IO waits"
- waitStatsPageIOLatchWaits = "Page IO latch waits"
- waitStatsPageLatchWaits = "Page latch waits"
- waitStatsNonpageLatchWaits = "Non-Page latch waits"
- waitStatsWaitForTheWorkerWaits = "Wait for the worker"
- waitStatsWorkspaceSynchronizationWaits = "Workspace synchronization waits"
- waitStatsTransactionOwnershipWaits = "Transaction ownership waits"
-)
+type perfDataCounterValuesWaitStats struct {
+ Name string
+
+ WaitStatsLockWaits float64 `perfdata:"Lock waits"`
+ WaitStatsMemoryGrantQueueWaits float64 `perfdata:"Memory grant queue waits"`
+ WaitStatsThreadSafeMemoryObjectsWaits float64 `perfdata:"Thread-safe memory objects waits"`
+ WaitStatsLogWriteWaits float64 `perfdata:"Log write waits"`
+ WaitStatsLogBufferWaits float64 `perfdata:"Log buffer waits"`
+ WaitStatsNetworkIOWaits float64 `perfdata:"Network IO waits"`
+ WaitStatsPageIOLatchWaits float64 `perfdata:"Page IO latch waits"`
+ WaitStatsPageLatchWaits float64 `perfdata:"Page latch waits"`
+ WaitStatsNonpageLatchWaits float64 `perfdata:"Non-Page latch waits"`
+ WaitStatsWaitForTheWorkerWaits float64 `perfdata:"Wait for the worker"`
+ WaitStatsWorkspaceSynchronizationWaits float64 `perfdata:"Workspace synchronization waits"`
+ WaitStatsTransactionOwnershipWaits float64 `perfdata:"Transaction ownership waits"`
+}
func (c *Collector) buildWaitStats() error {
var err error
- c.waitStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
+ c.waitStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
- counters := []string{
- waitStatsLockWaits,
- waitStatsMemoryGrantQueueWaits,
- waitStatsThreadSafeMemoryObjectsWaits,
- waitStatsLogWriteWaits,
- waitStatsLogBufferWaits,
- waitStatsNetworkIOWaits,
- waitStatsPageIOLatchWaits,
- waitStatsPageLatchWaits,
- waitStatsNonpageLatchWaits,
- waitStatsWaitForTheWorkerWaits,
- waitStatsWorkspaceSynchronizationWaits,
- waitStatsTransactionOwnershipWaits,
- }
for _, sqlInstance := range c.mssqlInstances {
- c.waitStatsPerfDataCollectors[sqlInstance.name], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance.name, "Wait Statistics"), perfdata.InstancesAll, counters)
+ c.waitStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesWaitStats](
+ c.mssqlGetPerfObjectName(sqlInstance.name, "Wait Statistics"), pdh.InstancesAll,
+ )
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance.name, err))
}
@@ -164,99 +155,95 @@ func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorWaitStats, c.waitStatsPerfDataCollectors, c.collectWaitStatsInstance)
}
-func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
- if perfDataCollector == nil {
- return types.ErrCollectorNotInitialized
- }
-
- perfData, err := perfDataCollector.Collect()
+func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
+ err := perfDataCollector.Collect(&c.waitStatsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err)
}
- for item, data := range perfData {
+ for _, data := range c.waitStatsPerfDataObject {
ch <- prometheus.MustNewConstMetric(
c.waitStatsLockWaits,
prometheus.CounterValue,
- data[waitStatsLockWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsLockWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsMemoryGrantQueueWaits,
prometheus.CounterValue,
- data[waitStatsMemoryGrantQueueWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsMemoryGrantQueueWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsThreadSafeMemoryObjectsWaits,
prometheus.CounterValue,
- data[waitStatsThreadSafeMemoryObjectsWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsThreadSafeMemoryObjectsWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsLogWriteWaits,
prometheus.CounterValue,
- data[waitStatsLogWriteWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsLogWriteWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsLogBufferWaits,
prometheus.CounterValue,
- data[waitStatsLogBufferWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsLogBufferWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsNetworkIOWaits,
prometheus.CounterValue,
- data[waitStatsNetworkIOWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsNetworkIOWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsPageIOLatchWaits,
prometheus.CounterValue,
- data[waitStatsPageIOLatchWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsPageIOLatchWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsPageLatchWaits,
prometheus.CounterValue,
- data[waitStatsPageLatchWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsPageLatchWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsNonPageLatchWaits,
prometheus.CounterValue,
- data[waitStatsNonpageLatchWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsNonpageLatchWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsWaitForTheWorkerWaits,
prometheus.CounterValue,
- data[waitStatsWaitForTheWorkerWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsWaitForTheWorkerWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsWorkspaceSynchronizationWaits,
prometheus.CounterValue,
- data[waitStatsWorkspaceSynchronizationWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsWorkspaceSynchronizationWaits,
+ sqlInstance, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsTransactionOwnershipWaits,
prometheus.CounterValue,
- data[waitStatsTransactionOwnershipWaits].FirstValue,
- sqlInstance, item,
+ data.WaitStatsTransactionOwnershipWaits,
+ sqlInstance, data.Name,
)
}
diff --git a/internal/collector/net/const.go b/internal/collector/net/const.go
deleted file mode 100644
index 5df2983a0..000000000
--- a/internal/collector/net/const.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package net
-
-const (
- bytesReceivedPerSec = "Bytes Received/sec"
- bytesSentPerSec = "Bytes Sent/sec"
- bytesTotalPerSec = "Bytes Total/sec"
- currentBandwidth = "Current Bandwidth"
- outputQueueLength = "Output Queue Length"
- packetsOutboundDiscarded = "Packets Outbound Discarded"
- packetsOutboundErrors = "Packets Outbound Errors"
- packetsPerSec = "Packets/sec"
- packetsReceivedDiscarded = "Packets Received Discarded"
- packetsReceivedErrors = "Packets Received Errors"
- packetsReceivedPerSec = "Packets Received/sec"
- packetsReceivedUnknown = "Packets Received Unknown"
- packetsSentPerSec = "Packets Sent/sec"
-)
diff --git a/internal/collector/net/net.go b/internal/collector/net/net.go
index 8528db176..5fabbcf1f 100644
--- a/internal/collector/net/net.go
+++ b/internal/collector/net/net.go
@@ -27,7 +27,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@@ -55,7 +55,8 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
bytesReceivedTotal *prometheus.Desc
bytesSentTotal *prometheus.Desc
@@ -158,23 +159,9 @@ func (c *Collector) Close() error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Network Interface", perfdata.InstancesAll, []string{
- bytesReceivedPerSec,
- bytesSentPerSec,
- bytesTotalPerSec,
- outputQueueLength,
- packetsOutboundDiscarded,
- packetsOutboundErrors,
- packetsPerSec,
- packetsReceivedDiscarded,
- packetsReceivedErrors,
- packetsReceivedPerSec,
- packetsReceivedUnknown,
- packetsSentPerSec,
- currentBandwidth,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Network Interface", pdh.InstancesAll)
if err != nil {
- return fmt.Errorf("failed to create Processor Information collector: %w", err)
+ return fmt.Errorf("failed to create Network Interface collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
@@ -298,13 +285,13 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Network Information metrics: %w", err)
}
- for nicName, nicData := range data {
- if c.config.NicExclude.MatchString(nicName) || !c.config.NicInclude.MatchString(nicName) {
+ for _, data := range c.perfDataObject {
+ if c.config.NicExclude.MatchString(data.Name) || !c.config.NicInclude.MatchString(data.Name) {
continue
}
@@ -312,80 +299,80 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.bytesReceivedTotal,
prometheus.CounterValue,
- nicData[bytesReceivedPerSec].FirstValue,
- nicName,
+ data.BytesReceivedPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSentTotal,
prometheus.CounterValue,
- nicData[bytesSentPerSec].FirstValue,
- nicName,
+ data.BytesSentPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesTotal,
prometheus.CounterValue,
- nicData[bytesTotalPerSec].FirstValue,
- nicName,
+ data.BytesTotalPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.outputQueueLength,
prometheus.GaugeValue,
- nicData[outputQueueLength].FirstValue,
- nicName,
+ data.OutputQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundDiscarded,
prometheus.CounterValue,
- nicData[packetsOutboundDiscarded].FirstValue,
- nicName,
+ data.PacketsOutboundDiscarded,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundErrors,
prometheus.CounterValue,
- nicData[packetsOutboundErrors].FirstValue,
- nicName,
+ data.PacketsOutboundErrors,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsTotal,
prometheus.CounterValue,
- nicData[packetsPerSec].FirstValue,
- nicName,
+ data.PacketsPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedDiscarded,
prometheus.CounterValue,
- nicData[packetsReceivedDiscarded].FirstValue,
- nicName,
+ data.PacketsReceivedDiscarded,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedErrors,
prometheus.CounterValue,
- nicData[packetsReceivedErrors].FirstValue,
- nicName,
+ data.PacketsReceivedErrors,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
- nicData[packetsReceivedPerSec].FirstValue,
- nicName,
+ data.PacketsReceivedPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedUnknown,
prometheus.CounterValue,
- nicData[packetsReceivedUnknown].FirstValue,
- nicName,
+ data.PacketsReceivedUnknown,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsSentTotal,
prometheus.CounterValue,
- nicData[packetsSentPerSec].FirstValue,
- nicName,
+ data.PacketsSentPerSec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.currentBandwidth,
prometheus.GaugeValue,
- nicData[currentBandwidth].FirstValue/8,
- nicName,
+ data.CurrentBandwidth/8,
+ data.Name,
)
}
diff --git a/internal/collector/net/net_bench_test.go b/internal/collector/net/net_bench_test.go
index d5929f869..d087a2fb9 100644
--- a/internal/collector/net/net_bench_test.go
+++ b/internal/collector/net/net_bench_test.go
@@ -27,6 +27,7 @@ func BenchmarkCollector(b *testing.B) {
// PrinterInclude is not set in testing context (kingpin flags not parsed), causing the collector to skip all interfaces.
localNicInclude := ".+"
- kingpin.CommandLine.GetArg("collector.net.nic-include").StringVar(&localNicInclude)
- testutils.FuncBenchmarkCollector(b, net.Name, net.NewWithFlags)
+ testutils.FuncBenchmarkCollector(b, net.Name, net.NewWithFlags, func(app *kingpin.Application) {
+ app.GetFlag("collector.net.nic-include").StringVar(&localNicInclude)
+ })
}
diff --git a/internal/collector/net/types.go b/internal/collector/net/types.go
new file mode 100644
index 000000000..c06c2413a
--- /dev/null
+++ b/internal/collector/net/types.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package net
+
+type perfDataCounterValues struct {
+ Name string
+
+ BytesReceivedPerSec float64 `perfdata:"Bytes Received/sec"`
+ BytesSentPerSec float64 `perfdata:"Bytes Sent/sec"`
+ BytesTotalPerSec float64 `perfdata:"Bytes Total/sec"`
+ CurrentBandwidth float64 `perfdata:"Current Bandwidth"`
+ OutputQueueLength float64 `perfdata:"Output Queue Length"`
+ PacketsOutboundDiscarded float64 `perfdata:"Packets Outbound Discarded"`
+ PacketsOutboundErrors float64 `perfdata:"Packets Outbound Errors"`
+ PacketsPerSec float64 `perfdata:"Packets/sec"`
+ PacketsReceivedDiscarded float64 `perfdata:"Packets Received Discarded"`
+ PacketsReceivedErrors float64 `perfdata:"Packets Received Errors"`
+ PacketsReceivedPerSec float64 `perfdata:"Packets Received/sec"`
+ PacketsReceivedUnknown float64 `perfdata:"Packets Received Unknown"`
+ PacketsSentPerSec float64 `perfdata:"Packets Sent/sec"`
+}
diff --git a/internal/collector/nps/const.go b/internal/collector/nps/const.go
deleted file mode 100644
index 3a1b0d5b7..000000000
--- a/internal/collector/nps/const.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package nps
-
-const (
- // NPS Authentication Server
- accessAccepts = "Access-Accepts"
- accessChallenges = "Access-Challenges"
- accessRejects = "Access-Rejects"
- accessRequests = "Access-Requests"
- accessBadAuthenticators = "Bad Authenticators"
- accessDroppedPackets = "Dropped Packets"
- accessInvalidRequests = "Invalid Requests"
- accessMalformedPackets = "Malformed Packets"
- accessPacketsReceived = "Packets Received"
- accessPacketsSent = "Packets Sent"
- accessServerResetTime = "Server Reset Time"
- accessServerUpTime = "Server Up Time"
- accessUnknownType = "Unknown Type"
-
- // NPS Accounting Server
- accountingRequests = "Accounting-Requests"
- accountingResponses = "Accounting-Responses"
- accountingBadAuthenticators = "Bad Authenticators"
- accountingDroppedPackets = "Dropped Packets"
- accountingInvalidRequests = "Invalid Requests"
- accountingMalformedPackets = "Malformed Packets"
- accountingNoRecord = "No Record"
- accountingPacketsReceived = "Packets Received"
- accountingPacketsSent = "Packets Sent"
- accountingServerResetTime = "Server Reset Time"
- accountingServerUpTime = "Server Up Time"
- accountingUnknownType = "Unknown Type"
-)
diff --git a/internal/collector/nps/nps.go b/internal/collector/nps/nps.go
index aa4e37a48..2a8bf163a 100644
--- a/internal/collector/nps/nps.go
+++ b/internal/collector/nps/nps.go
@@ -22,7 +22,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- accessPerfDataCollector *perfdata.Collector
+ accessPerfDataCollector *pdh.Collector
+ accessPerfDataObject []perfDataCounterValuesAccess
accessAccepts *prometheus.Desc
accessChallenges *prometheus.Desc
accessRejects *prometheus.Desc
@@ -52,7 +53,8 @@ type Collector struct {
accessServerUpTime *prometheus.Desc
accessUnknownType *prometheus.Desc
- accountingPerfDataCollector *perfdata.Collector
+ accountingPerfDataCollector *pdh.Collector
+ accountingPerfDataObject []perfDataCounterValuesAccounting
accountingRequests *prometheus.Desc
accountingResponses *prometheus.Desc
accountingBadAuthenticators *prometheus.Desc
@@ -96,39 +98,12 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
errs := make([]error, 0, 2)
- c.accessPerfDataCollector, err = perfdata.NewCollector("NPS Authentication Server", nil, []string{
- accessAccepts,
- accessChallenges,
- accessRejects,
- accessRequests,
- accessBadAuthenticators,
- accessDroppedPackets,
- accessInvalidRequests,
- accessMalformedPackets,
- accessPacketsReceived,
- accessPacketsSent,
- accessServerResetTime,
- accessServerUpTime,
- accessUnknownType,
- })
+ c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess]("NPS Authentication Server", nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
}
- c.accountingPerfDataCollector, err = perfdata.NewCollector("NPS Accounting Server", nil, []string{
- accountingRequests,
- accountingResponses,
- accountingBadAuthenticators,
- accountingDroppedPackets,
- accountingInvalidRequests,
- accountingMalformedPackets,
- accountingNoRecord,
- accountingPacketsReceived,
- accountingPacketsSent,
- accountingServerResetTime,
- accountingServerUpTime,
- accountingUnknownType,
- })
+ c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting]("NPS Accounting Server", nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
}
@@ -307,178 +282,168 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
// collectAccept sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) collectAccept(ch chan<- prometheus.Metric) error {
- perfData, err := c.accessPerfDataCollector.Collect()
+ err := c.accessPerfDataCollector.Collect(&c.accessPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect NPS Authentication Server metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("failed to collect NPS Authentication Server metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.accessAccepts,
prometheus.CounterValue,
- data[accessAccepts].FirstValue,
+ c.accessPerfDataObject[0].AccessAccepts,
)
ch <- prometheus.MustNewConstMetric(
c.accessChallenges,
prometheus.CounterValue,
- data[accessChallenges].FirstValue,
+ c.accessPerfDataObject[0].AccessChallenges,
)
ch <- prometheus.MustNewConstMetric(
c.accessRejects,
prometheus.CounterValue,
- data[accessRejects].FirstValue,
+ c.accessPerfDataObject[0].AccessRejects,
)
ch <- prometheus.MustNewConstMetric(
c.accessRequests,
prometheus.CounterValue,
- data[accessRequests].FirstValue,
+ c.accessPerfDataObject[0].AccessRequests,
)
ch <- prometheus.MustNewConstMetric(
c.accessBadAuthenticators,
prometheus.CounterValue,
- data[accessBadAuthenticators].FirstValue,
+ c.accessPerfDataObject[0].AccessBadAuthenticators,
)
ch <- prometheus.MustNewConstMetric(
c.accessDroppedPackets,
prometheus.CounterValue,
- data[accessDroppedPackets].FirstValue,
+ c.accessPerfDataObject[0].AccessDroppedPackets,
)
ch <- prometheus.MustNewConstMetric(
c.accessInvalidRequests,
prometheus.CounterValue,
- data[accessInvalidRequests].FirstValue,
+ c.accessPerfDataObject[0].AccessInvalidRequests,
)
ch <- prometheus.MustNewConstMetric(
c.accessMalformedPackets,
prometheus.CounterValue,
- data[accessMalformedPackets].FirstValue,
+ c.accessPerfDataObject[0].AccessMalformedPackets,
)
ch <- prometheus.MustNewConstMetric(
c.accessPacketsReceived,
prometheus.CounterValue,
- data[accessPacketsReceived].FirstValue,
+ c.accessPerfDataObject[0].AccessPacketsReceived,
)
ch <- prometheus.MustNewConstMetric(
c.accessPacketsSent,
prometheus.CounterValue,
- data[accessPacketsSent].FirstValue,
+ c.accessPerfDataObject[0].AccessPacketsSent,
)
ch <- prometheus.MustNewConstMetric(
c.accessServerResetTime,
prometheus.CounterValue,
- data[accessServerResetTime].FirstValue,
+ c.accessPerfDataObject[0].AccessServerResetTime,
)
ch <- prometheus.MustNewConstMetric(
c.accessServerUpTime,
prometheus.CounterValue,
- data[accessServerUpTime].FirstValue,
+ c.accessPerfDataObject[0].AccessServerUpTime,
)
ch <- prometheus.MustNewConstMetric(
c.accessUnknownType,
prometheus.CounterValue,
- data[accessUnknownType].FirstValue,
+ c.accessPerfDataObject[0].AccessUnknownType,
)
return nil
}
func (c *Collector) collectAccounting(ch chan<- prometheus.Metric) error {
- perfData, err := c.accountingPerfDataCollector.Collect()
+ err := c.accountingPerfDataCollector.Collect(&c.accountingPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect NPS Accounting Server metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("failed to collect NPS Accounting Server metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.accountingRequests,
prometheus.CounterValue,
- data[accountingRequests].FirstValue,
+ c.accountingPerfDataObject[0].AccountingRequests,
)
ch <- prometheus.MustNewConstMetric(
c.accountingResponses,
prometheus.CounterValue,
- data[accountingResponses].FirstValue,
+ c.accountingPerfDataObject[0].AccountingResponses,
)
ch <- prometheus.MustNewConstMetric(
c.accountingBadAuthenticators,
prometheus.CounterValue,
- data[accountingBadAuthenticators].FirstValue,
+ c.accountingPerfDataObject[0].AccountingBadAuthenticators,
)
ch <- prometheus.MustNewConstMetric(
c.accountingDroppedPackets,
prometheus.CounterValue,
- data[accountingDroppedPackets].FirstValue,
+ c.accountingPerfDataObject[0].AccountingDroppedPackets,
)
ch <- prometheus.MustNewConstMetric(
c.accountingInvalidRequests,
prometheus.CounterValue,
- data[accountingInvalidRequests].FirstValue,
+ c.accountingPerfDataObject[0].AccountingInvalidRequests,
)
ch <- prometheus.MustNewConstMetric(
c.accountingMalformedPackets,
prometheus.CounterValue,
- data[accountingMalformedPackets].FirstValue,
+ c.accountingPerfDataObject[0].AccountingMalformedPackets,
)
ch <- prometheus.MustNewConstMetric(
c.accountingNoRecord,
prometheus.CounterValue,
- data[accountingNoRecord].FirstValue,
+ c.accountingPerfDataObject[0].AccountingNoRecord,
)
ch <- prometheus.MustNewConstMetric(
c.accountingPacketsReceived,
prometheus.CounterValue,
- data[accountingPacketsReceived].FirstValue,
+ c.accountingPerfDataObject[0].AccountingPacketsReceived,
)
ch <- prometheus.MustNewConstMetric(
c.accountingPacketsSent,
prometheus.CounterValue,
- data[accountingPacketsSent].FirstValue,
+ c.accountingPerfDataObject[0].AccountingPacketsSent,
)
ch <- prometheus.MustNewConstMetric(
c.accountingServerResetTime,
prometheus.CounterValue,
- data[accountingServerResetTime].FirstValue,
+ c.accountingPerfDataObject[0].AccountingServerResetTime,
)
ch <- prometheus.MustNewConstMetric(
c.accountingServerUpTime,
prometheus.CounterValue,
- data[accountingServerUpTime].FirstValue,
+ c.accountingPerfDataObject[0].AccountingServerUpTime,
)
ch <- prometheus.MustNewConstMetric(
c.accountingUnknownType,
prometheus.CounterValue,
- data[accountingUnknownType].FirstValue,
+ c.accountingPerfDataObject[0].AccountingUnknownType,
)
return nil
diff --git a/internal/collector/nps/types.go b/internal/collector/nps/types.go
new file mode 100644
index 000000000..454f7e37a
--- /dev/null
+++ b/internal/collector/nps/types.go
@@ -0,0 +1,49 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package nps
+
+type perfDataCounterValuesAccess struct {
+ // NPS Authentication Server
+ AccessAccepts float64 `perfdata:"Access-Accepts"`
+ AccessChallenges float64 `perfdata:"Access-Challenges"`
+ AccessRejects float64 `perfdata:"Access-Rejects"`
+ AccessRequests float64 `perfdata:"Access-Requests"`
+ AccessBadAuthenticators float64 `perfdata:"Bad Authenticators"`
+ AccessDroppedPackets float64 `perfdata:"Dropped Packets"`
+ AccessInvalidRequests float64 `perfdata:"Invalid Requests"`
+ AccessMalformedPackets float64 `perfdata:"Malformed Packets"`
+ AccessPacketsReceived float64 `perfdata:"Packets Received"`
+ AccessPacketsSent float64 `perfdata:"Packets Sent"`
+ AccessServerResetTime float64 `perfdata:"Server Reset Time"`
+ AccessServerUpTime float64 `perfdata:"Server Up Time"`
+ AccessUnknownType float64 `perfdata:"Unknown Type"`
+}
+
+type perfDataCounterValuesAccounting struct {
+ // NPS Accounting Server
+ AccountingRequests float64 `perfdata:"Accounting-Requests"`
+ AccountingResponses float64 `perfdata:"Accounting-Responses"`
+ AccountingBadAuthenticators float64 `perfdata:"Bad Authenticators"`
+ AccountingDroppedPackets float64 `perfdata:"Dropped Packets"`
+ AccountingInvalidRequests float64 `perfdata:"Invalid Requests"`
+ AccountingMalformedPackets float64 `perfdata:"Malformed Packets"`
+ AccountingNoRecord float64 `perfdata:"No Record"`
+ AccountingPacketsReceived float64 `perfdata:"Packets Received"`
+ AccountingPacketsSent float64 `perfdata:"Packets Sent"`
+ AccountingServerResetTime float64 `perfdata:"Server Reset Time"`
+ AccountingServerUpTime float64 `perfdata:"Server Up Time"`
+ AccountingUnknownType float64 `perfdata:"Unknown Type"`
+}
diff --git a/internal/collector/pagefile/pagefile.go b/internal/collector/pagefile/pagefile.go
index d6a556018..5ce2b3e1d 100644
--- a/internal/collector/pagefile/pagefile.go
+++ b/internal/collector/pagefile/pagefile.go
@@ -24,7 +24,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/psapi"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -40,7 +40,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
pagingFreeBytes *prometheus.Desc
pagingLimitBytes *prometheus.Desc
@@ -75,9 +76,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Paging File", perfdata.InstancesAll, []string{
- usage,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Paging File", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Paging File collector: %w", err)
}
@@ -102,7 +101,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Paging File metrics: %w", err)
}
@@ -112,8 +111,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return err
}
- for fileName, pageFile := range data {
- fileString := strings.ReplaceAll(fileName, `\??\`, "")
+ for _, data := range c.perfDataObject {
+ fileString := strings.ReplaceAll(data.Name, `\??\`, "")
file, err := os.Stat(fileString)
var fileSize float64
@@ -126,7 +125,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.pagingFreeBytes,
prometheus.GaugeValue,
- fileSize-(pageFile[usage].FirstValue*float64(gpi.PageSize)),
+ fileSize-(data.Usage*float64(gpi.PageSize)),
fileString,
)
diff --git a/internal/collector/pagefile/const.go b/internal/collector/pagefile/types.go
similarity index 87%
rename from internal/collector/pagefile/const.go
rename to internal/collector/pagefile/types.go
index 65a145aeb..c3f7e5b49 100644
--- a/internal/collector/pagefile/const.go
+++ b/internal/collector/pagefile/types.go
@@ -15,6 +15,8 @@
package pagefile
-const (
- usage = "% Usage"
-)
+type perfDataCounterValues struct {
+ Name string
+
+ Usage float64 `perfdata:"% Usage"`
+}
diff --git a/internal/collector/performancecounter/performancecounter.go b/internal/collector/performancecounter/performancecounter.go
index 99452a07b..1924abc8d 100644
--- a/internal/collector/performancecounter/performancecounter.go
+++ b/internal/collector/performancecounter/performancecounter.go
@@ -19,13 +19,14 @@ import (
"errors"
"fmt"
"log/slog"
+ "reflect"
"slices"
"strings"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v3"
@@ -91,7 +92,7 @@ func NewWithFlags(app *kingpin.Application) *Collector {
}
if err := yaml.Unmarshal([]byte(objects), &c.config.Objects); err != nil {
- return fmt.Errorf("failed to parse objects: %w", err)
+ return fmt.Errorf("failed to parse objects %s: %w", objects, err)
}
return nil
@@ -147,6 +148,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
names = append(names, object.Name)
counters := make([]string, 0, len(object.Counters))
+ fields := make([]reflect.StructField, 0, len(object.Counters)+2)
for j, counter := range object.Counters {
if counter.Metric == "" {
@@ -169,9 +171,28 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
}
counters = append(counters, counter.Name)
+ fields = append(fields, reflect.StructField{
+ Name: strings.ToUpper(c.sanitizeMetricName(counter.Name)),
+ Type: reflect.TypeOf(float64(0)),
+ Tag: reflect.StructTag(fmt.Sprintf(`perfdata:"%s"`, counter.Name)),
+ })
}
- collector, err := perfdata.NewCollector(object.Object, object.Instances, counters)
+ if object.Instances != nil {
+ fields = append(fields, reflect.StructField{
+ Name: "Name",
+ Type: reflect.TypeOf(""),
+ })
+ }
+
+ fields = append(fields, reflect.StructField{
+ Name: "MetricType",
+ Type: reflect.TypeOf(prometheus.ValueType(0)),
+ })
+
+ valueType := reflect.StructOf(fields)
+
+ collector, err := pdh.NewCollectorWithReflection(object.Object, object.Instances, valueType)
if err != nil {
errs = append(errs, fmt.Errorf("failed collector for %s: %w", object.Name, err))
}
@@ -181,6 +202,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
}
object.collector = collector
+ object.perfDataObject = reflect.New(reflect.SliceOf(valueType)).Interface()
c.objects = append(c.objects, object)
}
@@ -242,41 +264,79 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectObject(ch chan<- prometheus.Metric, perfDataObject Object) error {
- collectedPerfData, err := perfDataObject.collector.Collect()
+ err := perfDataObject.collector.Collect(perfDataObject.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect data: %w", err)
}
var errs []error
- for collectedInstance, collectedInstanceCounters := range collectedPerfData {
+ sliceValue := reflect.ValueOf(perfDataObject.perfDataObject).Elem().Interface()
+ for i := range reflect.ValueOf(sliceValue).Len() {
for _, counter := range perfDataObject.Counters {
- collectedCounterValue, ok := collectedInstanceCounters[counter.Name]
- if !ok {
- errs = append(errs, fmt.Errorf("counter %s not found in collected data", counter.Name))
+ val := reflect.ValueOf(sliceValue).Index(i)
+
+ field := val.FieldByName(strings.ToUpper(c.sanitizeMetricName(counter.Name)))
+ if !field.IsValid() {
+ errs = append(errs, fmt.Errorf("%s not found in collected data", counter.Name))
+
+ continue
+ }
+
+ if field.Kind() != reflect.Float64 {
+ errs = append(errs, fmt.Errorf("failed to cast %s to float64", counter.Name))
+
+ continue
+ }
+
+ collectedCounterValue := field.Float()
+
+ field = val.FieldByName("MetricType")
+ if !field.IsValid() {
+ errs = append(errs, errors.New("field MetricType not found in collected data"))
continue
}
+ if field.Kind() != reflect.TypeOf(prometheus.ValueType(0)).Kind() {
+ errs = append(errs, fmt.Errorf("failed to cast MetricType for %s to prometheus.ValueType", counter.Name))
+
+ continue
+ }
+
+ metricType, _ := field.Interface().(prometheus.ValueType)
+
labels := make(prometheus.Labels, len(counter.Labels)+1)
- if collectedInstance != perfdata.InstanceEmpty {
- labels[perfDataObject.InstanceLabel] = collectedInstance
+ if perfDataObject.Instances != nil {
+ field := val.FieldByName("Name")
+ if !field.IsValid() {
+ errs = append(errs, errors.New("field Name not found in collected data"))
+
+ continue
+ }
+
+ if field.Kind() != reflect.String {
+ errs = append(errs, fmt.Errorf("failed to cast Name for %s to string", counter.Name))
+
+ continue
+ }
+
+ collectedInstance := field.String()
+ if collectedInstance != pdh.InstanceEmpty {
+ labels[perfDataObject.InstanceLabel] = collectedInstance
+ }
}
for key, value := range counter.Labels {
labels[key] = value
}
- var metricType prometheus.ValueType
-
switch counter.Type {
case "counter":
metricType = prometheus.CounterValue
case "gauge":
metricType = prometheus.GaugeValue
- default:
- metricType = collectedCounterValue.Type
}
ch <- prometheus.MustNewConstMetric(
@@ -287,21 +347,8 @@ func (c *Collector) collectObject(ch chan<- prometheus.Metric, perfDataObject Ob
labels,
),
metricType,
- collectedCounterValue.FirstValue,
+ collectedCounterValue,
)
-
- if collectedCounterValue.SecondValue != 0 {
- ch <- prometheus.MustNewConstMetric(
- prometheus.NewDesc(
- counter.Metric+"_second",
- "windows_exporter: custom Performance Counter metric",
- nil,
- labels,
- ),
- metricType,
- collectedCounterValue.SecondValue,
- )
- }
}
}
diff --git a/internal/collector/performancecounter/performancecounter_test.go b/internal/collector/performancecounter/performancecounter_test.go
index d9a74698a..e9752d757 100644
--- a/internal/collector/performancecounter/performancecounter_test.go
+++ b/internal/collector/performancecounter/performancecounter_test.go
@@ -25,7 +25,8 @@ import (
func BenchmarkCollector(b *testing.B) {
perfDataObjects := `[{"object":"Processor Information","instances":["*"],"instance_label":"core","counters":[{"name":"% Processor Time","metric":"windows_performancecounter_processor_information_processor_time","labels":{"state":"active"}},{"name":"% Idle Time","metric":"windows_performancecounter_processor_information_processor_time","labels":{"state":"idle"}}]},{"object":"Memory","counters":[{"name":"Cache Faults/sec","type":"counter"}]}]`
- kingpin.CommandLine.GetArg("collector.perfdata.objects").StringVar(&perfDataObjects)
- testutils.FuncBenchmarkCollector(b, performancecounter.Name, performancecounter.NewWithFlags)
+ testutils.FuncBenchmarkCollector(b, performancecounter.Name, performancecounter.NewWithFlags, func(app *kingpin.Application) {
+ app.GetFlag("collector.perfdata.objects").StringVar(&perfDataObjects)
+ })
}
diff --git a/internal/collector/performancecounter/types.go b/internal/collector/performancecounter/types.go
index a01d56918..2034f01a0 100644
--- a/internal/collector/performancecounter/types.go
+++ b/internal/collector/performancecounter/types.go
@@ -15,7 +15,9 @@
package performancecounter
-import "github.com/prometheus-community/windows_exporter/internal/perfdata"
+import (
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+)
type Object struct {
Name string `json:"name" yaml:"name"`
@@ -24,7 +26,8 @@ type Object struct {
Counters []Counter `json:"counters" yaml:"counters"`
InstanceLabel string `json:"instance_label" yaml:"instance_label"` //nolint:tagliatelle
- collector *perfdata.Collector
+ collector *pdh.Collector
+ perfDataObject any
}
type Counter struct {
diff --git a/internal/collector/physical_disk/const.go b/internal/collector/physical_disk/const.go
deleted file mode 100644
index 49ddfb3cc..000000000
--- a/internal/collector/physical_disk/const.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package physical_disk
-
-const (
- CurrentDiskQueueLength = "Current Disk Queue Length"
- DiskReadBytesPerSec = "Disk Read Bytes/sec"
- DiskReadsPerSec = "Disk Reads/sec"
- DiskWriteBytesPerSec = "Disk Write Bytes/sec"
- DiskWritesPerSec = "Disk Writes/sec"
- PercentDiskReadTime = "% Disk Read Time"
- PercentDiskWriteTime = "% Disk Write Time"
- PercentIdleTime = "% Idle Time"
- SplitIOPerSec = "Split IO/Sec"
- AvgDiskSecPerRead = "Avg. Disk sec/Read"
- AvgDiskSecPerWrite = "Avg. Disk sec/Write"
- AvgDiskSecPerTransfer = "Avg. Disk sec/Transfer"
-)
diff --git a/internal/collector/physical_disk/physical_disk.go b/internal/collector/physical_disk/physical_disk.go
index f1f8dd489..e8e23fbf1 100644
--- a/internal/collector/physical_disk/physical_disk.go
+++ b/internal/collector/physical_disk/physical_disk.go
@@ -23,7 +23,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -45,7 +45,8 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
idleTime *prometheus.Desc
readBytesTotal *prometheus.Desc
@@ -126,24 +127,9 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
- counters := []string{
- CurrentDiskQueueLength,
- DiskReadBytesPerSec,
- DiskReadsPerSec,
- DiskWriteBytesPerSec,
- DiskWritesPerSec,
- PercentDiskReadTime,
- PercentDiskWriteTime,
- PercentIdleTime,
- SplitIOPerSec,
- AvgDiskSecPerRead,
- AvgDiskSecPerWrite,
- AvgDiskSecPerTransfer,
- }
-
var err error
- c.perfDataCollector, err = perfdata.NewCollector("PhysicalDisk", perfdata.InstancesAll, counters)
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("PhysicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
}
@@ -238,102 +224,102 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect PhysicalDisk metrics: %w", err)
}
- for name, disk := range perfData {
- if c.config.DiskExclude.MatchString(name) ||
- !c.config.DiskInclude.MatchString(name) {
+ for _, data := range c.perfDataObject {
+ if c.config.DiskExclude.MatchString(data.Name) ||
+ !c.config.DiskInclude.MatchString(data.Name) {
continue
}
// Parse physical disk number from disk.Name. Mountpoint information is
// sometimes included, e.g. "1 C:".
- disk_number, _, _ := strings.Cut(name, " ")
+ disk_number, _, _ := strings.Cut(data.Name, " ")
ch <- prometheus.MustNewConstMetric(
c.requestsQueued,
prometheus.GaugeValue,
- disk[CurrentDiskQueueLength].FirstValue,
+ data.CurrentDiskQueueLength,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
- disk[DiskReadBytesPerSec].FirstValue,
+ data.DiskReadBytesPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
- disk[DiskReadsPerSec].FirstValue,
+ data.DiskReadsPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
- disk[DiskWriteBytesPerSec].FirstValue,
+ data.DiskWriteBytesPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
- disk[DiskWritesPerSec].FirstValue,
+ data.DiskWritesPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.readTime,
prometheus.CounterValue,
- disk[PercentDiskReadTime].FirstValue,
+ data.PercentDiskReadTime,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.writeTime,
prometheus.CounterValue,
- disk[PercentDiskWriteTime].FirstValue,
+ data.PercentDiskWriteTime,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.idleTime,
prometheus.CounterValue,
- disk[PercentIdleTime].FirstValue,
+ data.PercentIdleTime,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.splitIOs,
prometheus.CounterValue,
- disk[SplitIOPerSec].FirstValue,
+ data.SplitIOPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
- disk[AvgDiskSecPerRead].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgDiskSecPerRead*pdh.TicksToSecondScaleFactor,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
- disk[AvgDiskSecPerWrite].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgDiskSecPerWrite*pdh.TicksToSecondScaleFactor,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
- disk[AvgDiskSecPerTransfer].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgDiskSecPerTransfer*pdh.TicksToSecondScaleFactor,
disk_number,
)
}
diff --git a/internal/collector/physical_disk/types.go b/internal/collector/physical_disk/types.go
new file mode 100644
index 000000000..42fd87a05
--- /dev/null
+++ b/internal/collector/physical_disk/types.go
@@ -0,0 +1,33 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package physical_disk
+
+type perfDataCounterValues struct {
+ Name string
+
+ CurrentDiskQueueLength float64 `perfdata:"Current Disk Queue Length"`
+ DiskReadBytesPerSec float64 `perfdata:"Disk Read Bytes/sec"`
+ DiskReadsPerSec float64 `perfdata:"Disk Reads/sec"`
+ DiskWriteBytesPerSec float64 `perfdata:"Disk Write Bytes/sec"`
+ DiskWritesPerSec float64 `perfdata:"Disk Writes/sec"`
+ PercentDiskReadTime float64 `perfdata:"% Disk Read Time"`
+ PercentDiskWriteTime float64 `perfdata:"% Disk Write Time"`
+ PercentIdleTime float64 `perfdata:"% Idle Time"`
+ SplitIOPerSec float64 `perfdata:"Split IO/Sec"`
+ AvgDiskSecPerRead float64 `perfdata:"Avg. Disk sec/Read"`
+ AvgDiskSecPerWrite float64 `perfdata:"Avg. Disk sec/Write"`
+ AvgDiskSecPerTransfer float64 `perfdata:"Avg. Disk sec/Transfer"`
+}
diff --git a/internal/collector/printer/printer_test.go b/internal/collector/printer/printer_test.go
index 084c950b5..d96f275db 100644
--- a/internal/collector/printer/printer_test.go
+++ b/internal/collector/printer/printer_test.go
@@ -26,8 +26,10 @@ import (
func BenchmarkCollector(b *testing.B) {
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all printers.
printersInclude := ".+"
- kingpin.CommandLine.GetArg("collector.printer.include").StringVar(&printersInclude)
- testutils.FuncBenchmarkCollector(b, "printer", printer.NewWithFlags)
+
+ testutils.FuncBenchmarkCollector(b, "printer", printer.NewWithFlags, func(app *kingpin.Application) {
+ app.GetFlag("collector.printer.include").StringVar(&printersInclude)
+ })
}
func TestCollector(t *testing.T) {
diff --git a/internal/collector/process/const.go b/internal/collector/process/const.go
deleted file mode 100644
index 8d06d4353..000000000
--- a/internal/collector/process/const.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package process
-
-const (
- percentProcessorTime = "% Processor Time"
- percentPrivilegedTime = "% Privileged Time"
- percentUserTime = "% User Time"
- creatingProcessID = "Creating Process ID"
- elapsedTime = "Elapsed Time"
- handleCount = "Handle Count"
- ioDataBytesPerSec = "IO Data Bytes/sec"
- ioDataOperationsPerSec = "IO Data Operations/sec"
- ioOtherBytesPerSec = "IO Other Bytes/sec"
- ioOtherOperationsPerSec = "IO Other Operations/sec"
- ioReadBytesPerSec = "IO Read Bytes/sec"
- ioReadOperationsPerSec = "IO Read Operations/sec"
- ioWriteBytesPerSec = "IO Write Bytes/sec"
- ioWriteOperationsPerSec = "IO Write Operations/sec"
- pageFaultsPerSec = "Page Faults/sec"
- pageFileBytesPeak = "Page File Bytes Peak"
- pageFileBytes = "Page File Bytes"
- poolNonPagedBytes = "Pool Nonpaged Bytes"
- poolPagedBytes = "Pool Paged Bytes"
- priorityBase = "Priority Base"
- privateBytes = "Private Bytes"
- threadCount = "Thread Count"
- virtualBytesPeak = "Virtual Bytes Peak"
- virtualBytes = "Virtual Bytes"
- workingSetPrivate = "Working Set - Private"
- workingSetPeak = "Working Set Peak"
- workingSet = "Working Set"
-
- // Process V1.
- idProcess = "ID Process"
-
- // Process V2.
- processID = "Process ID"
-)
diff --git a/internal/collector/process/process.go b/internal/collector/process/process.go
index 24aec9048..9bf087536 100644
--- a/internal/collector/process/process.go
+++ b/internal/collector/process/process.go
@@ -20,7 +20,6 @@ import (
"fmt"
"log/slog"
"regexp"
- "runtime/debug"
"strconv"
"strings"
"sync"
@@ -28,7 +27,8 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+ "github.com/prometheus-community/windows_exporter/internal/pdh/registry"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@@ -57,12 +57,14 @@ type Collector struct {
miSession *mi.Session
workerProcessMIQueryQuery mi.Query
- perfDataCollector *perfdata.Collector
+ collectorVersion int
+
+ collectorV1
+ collectorV2
lookupCache sync.Map
- workerCh chan processWorkerRequest
- mu sync.RWMutex
+ mu sync.RWMutex
info *prometheus.Desc
cpuTimeTotal *prometheus.Desc
@@ -82,14 +84,6 @@ type Collector struct {
workingSetPrivate *prometheus.Desc
}
-type processWorkerRequest struct {
- ch chan<- prometheus.Metric
- name string
- performanceCounterValues map[string]perfdata.CounterValue
- waitGroup *sync.WaitGroup
- workerProcesses []WorkerProcess
-}
-
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
@@ -159,9 +153,8 @@ func (c *Collector) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
- c.perfDataCollector.Close()
-
- close(c.workerCh)
+ c.closeV1()
+ c.closeV2()
return nil
}
@@ -181,56 +174,35 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
c.workerProcessMIQueryQuery = miQuery
c.miSession = miSession
- counters := []string{
- processID,
- percentProcessorTime,
- percentPrivilegedTime,
- percentUserTime,
- creatingProcessID,
- elapsedTime,
- handleCount,
- ioDataBytesPerSec,
- ioDataOperationsPerSec,
- ioOtherBytesPerSec,
- ioOtherOperationsPerSec,
- ioReadBytesPerSec,
- ioReadOperationsPerSec,
- ioWriteBytesPerSec,
- ioWriteOperationsPerSec,
- pageFaultsPerSec,
- pageFileBytesPeak,
- pageFileBytes,
- poolNonPagedBytes,
- poolPagedBytes,
- priorityBase,
- privateBytes,
- threadCount,
- virtualBytesPeak,
- virtualBytes,
- workingSetPrivate,
- workingSetPeak,
- workingSet,
- }
-
- c.perfDataCollector, err = perfdata.NewCollector("Process V2", perfdata.InstancesAll, counters)
- if errors.Is(err, perfdata.NewPdhError(perfdata.PdhCstatusNoObject)) {
- counters[0] = idProcess
+ c.collectorVersion = 2
+ c.perfDataCollectorV2, err = pdh.NewCollector[perfDataCounterValuesV2]("Process V2", pdh.InstancesAll)
- c.perfDataCollector, err = perfdata.NewCollector("Process", perfdata.InstancesAll, counters)
+ if errors.Is(err, pdh.NewPdhError(pdh.CstatusNoObject)) {
+ c.collectorVersion = 1
+ c.perfDataCollectorV1, err = registry.NewCollector[perfDataCounterValuesV1]("Process", pdh.InstancesAll)
}
if err != nil {
return fmt.Errorf("failed to create Process collector: %w", err)
}
- c.workerCh = make(chan processWorkerRequest, 32)
- c.mu = sync.RWMutex{}
- c.lookupCache = sync.Map{}
+ if c.collectorVersion == 1 {
+ c.workerChV1 = make(chan processWorkerRequestV1, 32)
+
+ for range 4 {
+ go c.collectWorkerV1()
+ }
+ } else {
+ c.workerChV2 = make(chan processWorkerRequestV2, 32)
- for range 4 {
- go c.collectWorker()
+ for range 4 {
+ go c.collectWorkerV2()
+ }
}
+ c.mu = sync.RWMutex{}
+ c.lookupCache = sync.Map{}
+
if c.config.ProcessInclude.String() == "^(?:.*)$" && c.config.ProcessExclude.String() == "^(?:)$" {
logger.Warn("No filters specified for process collector. This will generate a very large number of metrics!")
}
@@ -336,21 +308,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
return nil
}
-type WorkerProcess struct {
- AppPoolName string `mi:"AppPoolName"`
- ProcessId uint64 `mi:"ProcessId"`
-}
-
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
- if err != nil {
- return fmt.Errorf("failed to collect metrics: %w", err)
- }
-
- if len(perfData) == 0 {
- return errors.New("perflib query returned empty result set")
- }
-
var workerProcesses []WorkerProcess
if c.config.EnableWorkerProcess {
if err := c.miSession.Query(&workerProcesses, mi.NamespaceRootWebAdministration, c.workerProcessMIQueryQuery); err != nil {
@@ -358,239 +316,11 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
}
- wg := &sync.WaitGroup{}
-
- for name, process := range perfData {
- // Duplicate processes are suffixed #, and an index number. Remove those.
- name, _, _ = strings.Cut(name, ":") // Process V2
- name, _, _ = strings.Cut(name, "#") // Process
-
- if c.config.ProcessExclude.MatchString(name) || !c.config.ProcessInclude.MatchString(name) {
- continue
- }
-
- wg.Add(1)
-
- c.workerCh <- processWorkerRequest{
- ch: ch,
- name: name,
- performanceCounterValues: process,
- workerProcesses: workerProcesses,
- waitGroup: wg,
- }
+ if c.collectorVersion == 1 {
+ return c.collectV1(ch, workerProcesses)
}
- wg.Wait()
-
- return nil
-}
-
-func (c *Collector) collectWorker() {
- defer func() {
- if r := recover(); r != nil {
- c.logger.Error("Worker panic",
- slog.Any("panic", r),
- slog.String("stack", string(debug.Stack())),
- )
-
- // Restart the collectWorker
- go c.collectWorker()
- }
- }()
-
- for req := range c.workerCh {
- (func() {
- defer req.waitGroup.Done()
-
- ch := req.ch
- name := req.name
- process := req.performanceCounterValues
-
- var pid uint64
-
- if v, ok := process[processID]; ok {
- pid = uint64(v.FirstValue)
- } else if v, ok = process[idProcess]; ok {
- pid = uint64(v.FirstValue)
- }
-
- parentPID := strconv.FormatUint(uint64(process[creatingProcessID].FirstValue), 10)
-
- if c.config.EnableWorkerProcess {
- for _, wp := range req.workerProcesses {
- if wp.ProcessId == pid {
- name = strings.Join([]string{name, wp.AppPoolName}, "_")
-
- break
- }
- }
- }
-
- cmdLine, processOwner, processGroupID, err := c.getProcessInformation(uint32(pid))
- if err != nil {
- c.logger.Debug("Failed to get process information",
- slog.Uint64("pid", pid),
- slog.Any("err", err),
- )
- }
-
- pidString := strconv.FormatUint(pid, 10)
-
- ch <- prometheus.MustNewConstMetric(
- c.info,
- prometheus.GaugeValue,
- 1.0,
- name, pidString, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.startTime,
- prometheus.GaugeValue,
- process[elapsedTime].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.handleCount,
- prometheus.GaugeValue,
- process[handleCount].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.cpuTimeTotal,
- prometheus.CounterValue,
- process[percentPrivilegedTime].FirstValue,
- name, pidString, "privileged",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.cpuTimeTotal,
- prometheus.CounterValue,
- process[percentUserTime].FirstValue,
- name, pidString, "user",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.ioBytesTotal,
- prometheus.CounterValue,
- process[ioOtherBytesPerSec].FirstValue,
- name, pidString, "other",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.ioOperationsTotal,
- prometheus.CounterValue,
- process[ioOtherOperationsPerSec].FirstValue,
- name, pidString, "other",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.ioBytesTotal,
- prometheus.CounterValue,
- process[ioReadBytesPerSec].FirstValue,
- name, pidString, "read",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.ioOperationsTotal,
- prometheus.CounterValue,
- process[ioReadOperationsPerSec].FirstValue,
- name, pidString, "read",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.ioBytesTotal,
- prometheus.CounterValue,
- process[ioWriteBytesPerSec].FirstValue,
- name, pidString, "write",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.ioOperationsTotal,
- prometheus.CounterValue,
- process[ioWriteOperationsPerSec].FirstValue,
- name, pidString, "write",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.pageFaultsTotal,
- prometheus.CounterValue,
- process[pageFaultsPerSec].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.pageFileBytes,
- prometheus.GaugeValue,
- process[pageFileBytes].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.poolBytes,
- prometheus.GaugeValue,
- process[poolNonPagedBytes].FirstValue,
- name, pidString, "nonpaged",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.poolBytes,
- prometheus.GaugeValue,
- process[poolPagedBytes].FirstValue,
- name, pidString, "paged",
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.priorityBase,
- prometheus.GaugeValue,
- process[priorityBase].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.privateBytes,
- prometheus.GaugeValue,
- process[privateBytes].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.threadCount,
- prometheus.GaugeValue,
- process[threadCount].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.virtualBytes,
- prometheus.GaugeValue,
- process[virtualBytes].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.workingSetPrivate,
- prometheus.GaugeValue,
- process[workingSetPrivate].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.workingSetPeak,
- prometheus.GaugeValue,
- process[workingSetPeak].FirstValue,
- name, pidString,
- )
-
- ch <- prometheus.MustNewConstMetric(
- c.workingSet,
- prometheus.GaugeValue,
- process[workingSet].FirstValue,
- name, pidString,
- )
- })()
- }
+ return c.collectV2(ch, workerProcesses)
}
// ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25
diff --git a/internal/collector/process/process_test.go b/internal/collector/process/process_test.go
index 8de22ba55..6d71cf26e 100644
--- a/internal/collector/process/process_test.go
+++ b/internal/collector/process/process_test.go
@@ -26,9 +26,10 @@ import (
func BenchmarkProcessCollector(b *testing.B) {
// PrinterInclude is not set in testing context (kingpin flags not parsed), causing the collector to skip all processes.
localProcessInclude := ".+"
- kingpin.CommandLine.GetArg("collector.process.include").StringVar(&localProcessInclude)
// No context name required as collector source is WMI
- testutils.FuncBenchmarkCollector(b, process.Name, process.NewWithFlags)
+ testutils.FuncBenchmarkCollector(b, process.Name, process.NewWithFlags, func(app *kingpin.Application) {
+ app.GetFlag("collector.process.include").StringVar(&localProcessInclude)
+ })
}
func TestCollector(t *testing.T) {
diff --git a/internal/collector/process/process_v1.go b/internal/collector/process/process_v1.go
new file mode 100644
index 000000000..a50308aff
--- /dev/null
+++ b/internal/collector/process/process_v1.go
@@ -0,0 +1,285 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package process
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "runtime/debug"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/prometheus-community/windows_exporter/internal/pdh/registry"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type collectorV1 struct {
+ perfDataCollectorV1 *registry.Collector
+ perfDataObjectV1 []perfDataCounterValuesV1
+ workerChV1 chan processWorkerRequestV1
+}
+
+type processWorkerRequestV1 struct {
+ ch chan<- prometheus.Metric
+ name string
+ performanceCounterValues perfDataCounterValuesV1
+ waitGroup *sync.WaitGroup
+ workerProcesses []WorkerProcess
+}
+
+func (c *Collector) closeV1() {
+ c.perfDataCollectorV1.Close()
+
+ if c.workerChV1 != nil {
+ close(c.workerChV1)
+ c.workerChV1 = nil
+ }
+}
+
+func (c *Collector) collectV1(ch chan<- prometheus.Metric, workerProcesses []WorkerProcess) error {
+ err := c.perfDataCollectorV1.Collect(&c.perfDataObjectV1)
+ if err != nil {
+ return fmt.Errorf("failed to collect metrics: %w", err)
+ }
+
+ wg := &sync.WaitGroup{}
+
+ for _, process := range c.perfDataObjectV1 {
+ // Duplicate processes are suffixed #, and an index number. Remove those.
+ name, _, _ := strings.Cut(process.Name, ":") // Process V1
+
+ if c.config.ProcessExclude.MatchString(name) || !c.config.ProcessInclude.MatchString(name) {
+ continue
+ }
+
+ wg.Add(1)
+
+ c.workerChV1 <- processWorkerRequestV1{
+ ch: ch,
+ name: name,
+ performanceCounterValues: process,
+ workerProcesses: workerProcesses,
+ waitGroup: wg,
+ }
+ }
+
+ wg.Wait()
+
+ return nil
+}
+
+func (c *Collector) collectWorkerV1() {
+ defer func() {
+ if r := recover(); r != nil {
+ c.logger.Error("Worker panic",
+ slog.Any("panic", r),
+ slog.String("stack", string(debug.Stack())),
+ )
+
+ // Restart the collectWorker
+ go c.collectWorkerV1()
+ }
+ }()
+
+ for req := range c.workerChV1 {
+ (func() {
+ defer req.waitGroup.Done()
+
+ ch := req.ch
+ name := req.name
+ data := req.performanceCounterValues
+
+ pid := uint64(data.IdProcess)
+ parentPID := strconv.FormatUint(uint64(data.CreatingProcessID), 10)
+
+ if c.config.EnableWorkerProcess {
+ for _, wp := range req.workerProcesses {
+ if wp.ProcessId == pid {
+ name = strings.Join([]string{name, wp.AppPoolName}, "_")
+
+ break
+ }
+ }
+ }
+
+ cmdLine, processOwner, processGroupID, err := c.getProcessInformation(uint32(pid))
+ if err != nil {
+ slog.LogAttrs(context.Background(), slog.LevelDebug, "Failed to get process information",
+ slog.Uint64("pid", pid),
+ slog.Any("err", err),
+ )
+ }
+
+ pidString := strconv.FormatUint(pid, 10)
+
+ ch <- prometheus.MustNewConstMetric(
+ c.info,
+ prometheus.GaugeValue,
+ 1.0,
+ name, pidString, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.startTime,
+ prometheus.GaugeValue,
+ data.ElapsedTime,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.handleCount,
+ prometheus.GaugeValue,
+ data.HandleCount,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.cpuTimeTotal,
+ prometheus.CounterValue,
+ data.PercentPrivilegedTime,
+ name, pidString, "privileged",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.cpuTimeTotal,
+ prometheus.CounterValue,
+ data.PercentUserTime,
+ name, pidString, "user",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioBytesTotal,
+ prometheus.CounterValue,
+ data.IoOtherBytesPerSec,
+ name, pidString, "other",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioOperationsTotal,
+ prometheus.CounterValue,
+ data.IoOtherOperationsPerSec,
+ name, pidString, "other",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioBytesTotal,
+ prometheus.CounterValue,
+ data.IoReadBytesPerSec,
+ name, pidString, "read",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioOperationsTotal,
+ prometheus.CounterValue,
+ data.IoReadOperationsPerSec,
+ name, pidString, "read",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioBytesTotal,
+ prometheus.CounterValue,
+ data.IoWriteBytesPerSec,
+ name, pidString, "write",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioOperationsTotal,
+ prometheus.CounterValue,
+ data.IoWriteOperationsPerSec,
+ name, pidString, "write",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.pageFaultsTotal,
+ prometheus.CounterValue,
+ data.PageFaultsPerSec,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.pageFileBytes,
+ prometheus.GaugeValue,
+ data.PageFileBytes,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.poolBytes,
+ prometheus.GaugeValue,
+ data.PoolNonPagedBytes,
+ name, pidString, "nonpaged",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.poolBytes,
+ prometheus.GaugeValue,
+ data.PoolPagedBytes,
+ name, pidString, "paged",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.priorityBase,
+ prometheus.GaugeValue,
+ data.PriorityBase,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.privateBytes,
+ prometheus.GaugeValue,
+ data.PrivateBytes,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.threadCount,
+ prometheus.GaugeValue,
+ data.ThreadCount,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.virtualBytes,
+ prometheus.GaugeValue,
+ data.VirtualBytes,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.workingSetPrivate,
+ prometheus.GaugeValue,
+ data.WorkingSetPrivate,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.workingSetPeak,
+ prometheus.GaugeValue,
+ data.WorkingSetPeak,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.workingSet,
+ prometheus.GaugeValue,
+ data.WorkingSet,
+ name, pidString,
+ )
+ })()
+ }
+}
diff --git a/internal/collector/process/process_v2.go b/internal/collector/process/process_v2.go
new file mode 100644
index 000000000..2e0d1a887
--- /dev/null
+++ b/internal/collector/process/process_v2.go
@@ -0,0 +1,285 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package process
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "runtime/debug"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type collectorV2 struct {
+ perfDataCollectorV2 *pdh.Collector
+ perfDataObjectV2 []perfDataCounterValuesV2
+ workerChV2 chan processWorkerRequestV2
+}
+
+type processWorkerRequestV2 struct {
+ ch chan<- prometheus.Metric
+ name string
+ performanceCounterValues perfDataCounterValuesV2
+ waitGroup *sync.WaitGroup
+ workerProcesses []WorkerProcess
+}
+
+func (c *Collector) closeV2() {
+ c.perfDataCollectorV2.Close()
+
+ if c.workerChV2 != nil {
+ close(c.workerChV2)
+ c.workerChV2 = nil
+ }
+}
+
+func (c *Collector) collectV2(ch chan<- prometheus.Metric, workerProcesses []WorkerProcess) error {
+ err := c.perfDataCollectorV2.Collect(&c.perfDataObjectV2)
+ if err != nil {
+ return fmt.Errorf("failed to collect metrics: %w", err)
+ }
+
+ wg := &sync.WaitGroup{}
+
+ for _, process := range c.perfDataObjectV2 {
+ // Duplicate processes are suffixed #, and an index number. Remove those.
+ name, _, _ := strings.Cut(process.Name, ":") // Process V2
+
+ if c.config.ProcessExclude.MatchString(name) || !c.config.ProcessInclude.MatchString(name) {
+ continue
+ }
+
+ wg.Add(1)
+
+ c.workerChV2 <- processWorkerRequestV2{
+ ch: ch,
+ name: name,
+ performanceCounterValues: process,
+ workerProcesses: workerProcesses,
+ waitGroup: wg,
+ }
+ }
+
+ wg.Wait()
+
+ return nil
+}
+
+func (c *Collector) collectWorkerV2() {
+ defer func() {
+ if r := recover(); r != nil {
+ c.logger.Error("Worker panic",
+ slog.Any("panic", r),
+ slog.String("stack", string(debug.Stack())),
+ )
+
+ // Restart the collectWorker
+ go c.collectWorkerV2()
+ }
+ }()
+
+ for req := range c.workerChV2 {
+ (func() {
+ defer req.waitGroup.Done()
+
+ ch := req.ch
+ name := req.name
+ data := req.performanceCounterValues
+
+ pid := uint64(data.ProcessID)
+ parentPID := strconv.FormatUint(uint64(data.CreatingProcessID), 10)
+
+ if c.config.EnableWorkerProcess {
+ for _, wp := range req.workerProcesses {
+ if wp.ProcessId == pid {
+ name = strings.Join([]string{name, wp.AppPoolName}, "_")
+
+ break
+ }
+ }
+ }
+
+ cmdLine, processOwner, processGroupID, err := c.getProcessInformation(uint32(pid))
+ if err != nil {
+ slog.LogAttrs(context.Background(), slog.LevelDebug, "Failed to get process information",
+ slog.Uint64("pid", pid),
+ slog.Any("err", err),
+ )
+ }
+
+ pidString := strconv.FormatUint(pid, 10)
+
+ ch <- prometheus.MustNewConstMetric(
+ c.info,
+ prometheus.GaugeValue,
+ 1.0,
+ name, pidString, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.startTime,
+ prometheus.GaugeValue,
+ data.ElapsedTime,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.handleCount,
+ prometheus.GaugeValue,
+ data.HandleCount,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.cpuTimeTotal,
+ prometheus.CounterValue,
+ data.PercentPrivilegedTime,
+ name, pidString, "privileged",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.cpuTimeTotal,
+ prometheus.CounterValue,
+ data.PercentUserTime,
+ name, pidString, "user",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioBytesTotal,
+ prometheus.CounterValue,
+ data.IoOtherBytesPerSec,
+ name, pidString, "other",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioOperationsTotal,
+ prometheus.CounterValue,
+ data.IoOtherOperationsPerSec,
+ name, pidString, "other",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioBytesTotal,
+ prometheus.CounterValue,
+ data.IoReadBytesPerSec,
+ name, pidString, "read",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioOperationsTotal,
+ prometheus.CounterValue,
+ data.IoReadOperationsPerSec,
+ name, pidString, "read",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioBytesTotal,
+ prometheus.CounterValue,
+ data.IoWriteBytesPerSec,
+ name, pidString, "write",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.ioOperationsTotal,
+ prometheus.CounterValue,
+ data.IoWriteOperationsPerSec,
+ name, pidString, "write",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.pageFaultsTotal,
+ prometheus.CounterValue,
+ data.PageFaultsPerSec,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.pageFileBytes,
+ prometheus.GaugeValue,
+ data.PageFileBytes,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.poolBytes,
+ prometheus.GaugeValue,
+ data.PoolNonPagedBytes,
+ name, pidString, "nonpaged",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.poolBytes,
+ prometheus.GaugeValue,
+ data.PoolPagedBytes,
+ name, pidString, "paged",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.priorityBase,
+ prometheus.GaugeValue,
+ data.PriorityBase,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.privateBytes,
+ prometheus.GaugeValue,
+ data.PrivateBytes,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.threadCount,
+ prometheus.GaugeValue,
+ data.ThreadCount,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.virtualBytes,
+ prometheus.GaugeValue,
+ data.VirtualBytes,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.workingSetPrivate,
+ prometheus.GaugeValue,
+ data.WorkingSetPrivate,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.workingSetPeak,
+ prometheus.GaugeValue,
+ data.WorkingSetPeak,
+ name, pidString,
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ c.workingSet,
+ prometheus.GaugeValue,
+ data.WorkingSet,
+ name, pidString,
+ )
+ })()
+ }
+}
diff --git a/internal/collector/process/types.go b/internal/collector/process/types.go
new file mode 100644
index 000000000..1d3889171
--- /dev/null
+++ b/internal/collector/process/types.go
@@ -0,0 +1,87 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package process
+
+type WorkerProcess struct {
+ AppPoolName string `mi:"AppPoolName"`
+ ProcessId uint64 `mi:"ProcessId"`
+}
+
+type perfDataCounterValuesV1 struct {
+ Name string
+
+ PercentProcessorTime float64 `perfdata:"% Processor Time"`
+ PercentPrivilegedTime float64 `perfdata:"% Privileged Time"`
+ PercentUserTime float64 `perfdata:"% User Time"`
+ CreatingProcessID float64 `perfdata:"Creating Process ID"`
+ ElapsedTime float64 `perfdata:"Elapsed Time"`
+ HandleCount float64 `perfdata:"Handle Count"`
+ IoDataBytesPerSec float64 `perfdata:"IO Data Bytes/sec"`
+ IoDataOperationsPerSec float64 `perfdata:"IO Data Operations/sec"`
+ IoOtherBytesPerSec float64 `perfdata:"IO Other Bytes/sec"`
+ IoOtherOperationsPerSec float64 `perfdata:"IO Other Operations/sec"`
+ IoReadBytesPerSec float64 `perfdata:"IO Read Bytes/sec"`
+ IoReadOperationsPerSec float64 `perfdata:"IO Read Operations/sec"`
+ IoWriteBytesPerSec float64 `perfdata:"IO Write Bytes/sec"`
+ IoWriteOperationsPerSec float64 `perfdata:"IO Write Operations/sec"`
+ PageFaultsPerSec float64 `perfdata:"Page Faults/sec"`
+ PageFileBytesPeak float64 `perfdata:"Page File Bytes Peak"`
+ PageFileBytes float64 `perfdata:"Page File Bytes"`
+ PoolNonPagedBytes float64 `perfdata:"Pool Nonpaged Bytes"`
+ PoolPagedBytes float64 `perfdata:"Pool Paged Bytes"`
+ PriorityBase float64 `perfdata:"Priority Base"`
+ PrivateBytes float64 `perfdata:"Private Bytes"`
+ ThreadCount float64 `perfdata:"Thread Count"`
+ VirtualBytesPeak float64 `perfdata:"Virtual Bytes Peak"`
+ VirtualBytes float64 `perfdata:"Virtual Bytes"`
+ WorkingSetPrivate float64 `perfdata:"Working Set - Private"`
+ WorkingSetPeak float64 `perfdata:"Working Set Peak"`
+ WorkingSet float64 `perfdata:"Working Set"`
+ IdProcess float64 `perfdata:"ID Process"`
+}
+
+type perfDataCounterValuesV2 struct {
+ Name string
+
+ PercentProcessorTime float64 `perfdata:"% Processor Time"`
+ PercentPrivilegedTime float64 `perfdata:"% Privileged Time"`
+ PercentUserTime float64 `perfdata:"% User Time"`
+ CreatingProcessID float64 `perfdata:"Creating Process ID"`
+ ElapsedTime float64 `perfdata:"Elapsed Time"`
+ HandleCount float64 `perfdata:"Handle Count"`
+ IoDataBytesPerSec float64 `perfdata:"IO Data Bytes/sec"`
+ IoDataOperationsPerSec float64 `perfdata:"IO Data Operations/sec"`
+ IoOtherBytesPerSec float64 `perfdata:"IO Other Bytes/sec"`
+ IoOtherOperationsPerSec float64 `perfdata:"IO Other Operations/sec"`
+ IoReadBytesPerSec float64 `perfdata:"IO Read Bytes/sec"`
+ IoReadOperationsPerSec float64 `perfdata:"IO Read Operations/sec"`
+ IoWriteBytesPerSec float64 `perfdata:"IO Write Bytes/sec"`
+ IoWriteOperationsPerSec float64 `perfdata:"IO Write Operations/sec"`
+ PageFaultsPerSec float64 `perfdata:"Page Faults/sec"`
+ PageFileBytesPeak float64 `perfdata:"Page File Bytes Peak"`
+ PageFileBytes float64 `perfdata:"Page File Bytes"`
+ PoolNonPagedBytes float64 `perfdata:"Pool Nonpaged Bytes"`
+ PoolPagedBytes float64 `perfdata:"Pool Paged Bytes"`
+ PriorityBase float64 `perfdata:"Priority Base"`
+ PrivateBytes float64 `perfdata:"Private Bytes"`
+ ThreadCount float64 `perfdata:"Thread Count"`
+ VirtualBytesPeak float64 `perfdata:"Virtual Bytes Peak"`
+ VirtualBytes float64 `perfdata:"Virtual Bytes"`
+ WorkingSetPrivate float64 `perfdata:"Working Set - Private"`
+ WorkingSetPeak float64 `perfdata:"Working Set Peak"`
+ WorkingSet float64 `perfdata:"Working Set"`
+ ProcessID float64 `perfdata:"Process ID"`
+}
diff --git a/internal/collector/remote_fx/const.go b/internal/collector/remote_fx/const.go
deleted file mode 100644
index 6c599f84b..000000000
--- a/internal/collector/remote_fx/const.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package remote_fx
-
-const (
- BaseTCPRTT = "Base TCP RTT"
- BaseUDPRTT = "Base UDP RTT"
- CurrentTCPBandwidth = "Current TCP Bandwidth"
- CurrentTCPRTT = "Current TCP RTT"
- CurrentUDPBandwidth = "Current UDP Bandwidth"
- CurrentUDPRTT = "Current UDP RTT"
- TotalReceivedBytes = "Total Received Bytes"
- TotalSentBytes = "Total Sent Bytes"
- UDPPacketsReceivedPersec = "UDP Packets Received/sec"
- UDPPacketsSentPersec = "UDP Packets Sent/sec"
- FECRate = "FEC rate"
- LossRate = "Loss rate"
- RetransmissionRate = "Retransmission rate"
-
- AverageEncodingTime = "Average Encoding Time"
- FrameQuality = "Frame Quality"
- FramesSkippedPerSecondInsufficientClientResources = "Frames Skipped/Second - Insufficient Server Resources"
- FramesSkippedPerSecondInsufficientNetworkResources = "Frames Skipped/Second - Insufficient Network Resources"
- FramesSkippedPerSecondInsufficientServerResources = "Frames Skipped/Second - Insufficient Client Resources"
- GraphicsCompressionratio = "Graphics Compression ratio"
- InputFramesPerSecond = "Input Frames/Second"
- OutputFramesPerSecond = "Output Frames/Second"
- SourceFramesPerSecond = "Source Frames/Second"
-)
diff --git a/internal/collector/remote_fx/remote_fx.go b/internal/collector/remote_fx/remote_fx.go
index 5bf783cf4..197cda10d 100644
--- a/internal/collector/remote_fx/remote_fx.go
+++ b/internal/collector/remote_fx/remote_fx.go
@@ -23,7 +23,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -44,8 +44,10 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollectorNetwork *perfdata.Collector
- perfDataCollectorGraphics *perfdata.Collector
+ perfDataCollectorNetwork *pdh.Collector
+ perfDataObjectNetwork []perfDataCounterValuesNetwork
+ perfDataCollectorGraphics *pdh.Collector
+ perfDataObjectGraphics []perfDataCounterValuesGraphics
// net
baseTCPRTT *prometheus.Desc
@@ -102,36 +104,12 @@ func (c *Collector) Close() error {
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
var err error
- c.perfDataCollectorNetwork, err = perfdata.NewCollector("RemoteFX Network", perfdata.InstancesAll, []string{
- BaseTCPRTT,
- BaseUDPRTT,
- CurrentTCPBandwidth,
- CurrentTCPRTT,
- CurrentUDPBandwidth,
- CurrentUDPRTT,
- TotalReceivedBytes,
- TotalSentBytes,
- UDPPacketsReceivedPersec,
- UDPPacketsSentPersec,
- FECRate,
- LossRate,
- RetransmissionRate,
- })
+ c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork]("RemoteFX Network", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
}
- c.perfDataCollectorGraphics, err = perfdata.NewCollector("RemoteFX Graphics", perfdata.InstancesAll, []string{
- AverageEncodingTime,
- FrameQuality,
- FramesSkippedPerSecondInsufficientClientResources,
- FramesSkippedPerSecondInsufficientNetworkResources,
- FramesSkippedPerSecondInsufficientServerResources,
- GraphicsCompressionratio,
- InputFramesPerSecond,
- OutputFramesPerSecond,
- SourceFramesPerSecond,
- })
+ c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics]("RemoteFX Graphics", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
}
@@ -280,14 +258,14 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectRemoteFXNetworkCount(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorNetwork.Collect()
+ err := c.perfDataCollectorNetwork.Collect(&c.perfDataObjectNetwork)
if err != nil {
return fmt.Errorf("failed to collect RemoteFX Network metrics: %w", err)
}
- for name, data := range perfData {
+ for _, data := range c.perfDataObjectNetwork {
// only connect metrics for remote named sessions
- sessionName := normalizeSessionName(name)
+ sessionName := normalizeSessionName(data.Name)
if n := strings.ToLower(sessionName); n == "" || n == "services" || n == "console" {
continue
}
@@ -295,81 +273,81 @@ func (c *Collector) collectRemoteFXNetworkCount(ch chan<- prometheus.Metric) err
ch <- prometheus.MustNewConstMetric(
c.baseTCPRTT,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[BaseTCPRTT].FirstValue),
+ utils.MilliSecToSec(data.BaseTCPRTT),
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.baseUDPRTT,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[BaseUDPRTT].FirstValue),
+ utils.MilliSecToSec(data.BaseUDPRTT),
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.currentTCPBandwidth,
prometheus.GaugeValue,
- (data[CurrentTCPBandwidth].FirstValue*1000)/8,
+ (data.CurrentTCPBandwidth*1000)/8,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.currentTCPRTT,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[CurrentTCPRTT].FirstValue),
+ utils.MilliSecToSec(data.CurrentTCPRTT),
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.currentUDPBandwidth,
prometheus.GaugeValue,
- (data[CurrentUDPBandwidth].FirstValue*1000)/8,
+ (data.CurrentUDPBandwidth*1000)/8,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.currentUDPRTT,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[CurrentUDPRTT].FirstValue),
+ utils.MilliSecToSec(data.CurrentUDPRTT),
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.totalReceivedBytes,
prometheus.CounterValue,
- data[TotalReceivedBytes].FirstValue,
+ data.TotalReceivedBytes,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.totalSentBytes,
prometheus.CounterValue,
- data[TotalSentBytes].FirstValue,
+ data.TotalSentBytes,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.udpPacketsReceivedPerSec,
prometheus.CounterValue,
- data[UDPPacketsReceivedPersec].FirstValue,
+ data.UDPPacketsReceivedPersec,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.udpPacketsSentPerSec,
prometheus.CounterValue,
- data[UDPPacketsSentPersec].FirstValue,
+ data.UDPPacketsSentPersec,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.fecRate,
prometheus.GaugeValue,
- data[FECRate].FirstValue,
+ data.FECRate,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.lossRate,
prometheus.GaugeValue,
- data[LossRate].FirstValue,
+ data.LossRate,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.retransmissionRate,
prometheus.GaugeValue,
- data[RetransmissionRate].FirstValue,
+ data.RetransmissionRate,
sessionName,
)
}
@@ -378,14 +356,14 @@ func (c *Collector) collectRemoteFXNetworkCount(ch chan<- prometheus.Metric) err
}
func (c *Collector) collectRemoteFXGraphicsCounters(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorNetwork.Collect()
+ err := c.perfDataCollectorGraphics.Collect(&c.perfDataObjectGraphics)
if err != nil {
return fmt.Errorf("failed to collect RemoteFX Graphics metrics: %w", err)
}
- for name, data := range perfData {
+ for _, data := range c.perfDataObjectGraphics {
// only connect metrics for remote named sessions
- sessionName := normalizeSessionName(name)
+ sessionName := normalizeSessionName(data.Name)
if n := strings.ToLower(sessionName); n == "" || n == "services" || n == "console" {
continue
}
@@ -393,58 +371,58 @@ func (c *Collector) collectRemoteFXGraphicsCounters(ch chan<- prometheus.Metric)
ch <- prometheus.MustNewConstMetric(
c.averageEncodingTime,
prometheus.GaugeValue,
- utils.MilliSecToSec(data[AverageEncodingTime].FirstValue),
+ utils.MilliSecToSec(data.AverageEncodingTime),
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.frameQuality,
prometheus.GaugeValue,
- data[FrameQuality].FirstValue,
+ data.FrameQuality,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
- data[FramesSkippedPerSecondInsufficientClientResources].FirstValue,
+ data.FramesSkippedPerSecondInsufficientClientResources,
sessionName,
"client",
)
ch <- prometheus.MustNewConstMetric(
c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
- data[FramesSkippedPerSecondInsufficientNetworkResources].FirstValue,
+ data.FramesSkippedPerSecondInsufficientNetworkResources,
sessionName,
"network",
)
ch <- prometheus.MustNewConstMetric(
c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
- data[FramesSkippedPerSecondInsufficientServerResources].FirstValue,
+ data.FramesSkippedPerSecondInsufficientServerResources,
sessionName,
"server",
)
ch <- prometheus.MustNewConstMetric(
c.graphicsCompressionRatio,
prometheus.GaugeValue,
- data[GraphicsCompressionratio].FirstValue,
+ data.GraphicsCompressionratio,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.inputFramesPerSecond,
prometheus.CounterValue,
- data[InputFramesPerSecond].FirstValue,
+ data.InputFramesPerSecond,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.outputFramesPerSecond,
prometheus.CounterValue,
- data[OutputFramesPerSecond].FirstValue,
+ data.OutputFramesPerSecond,
sessionName,
)
ch <- prometheus.MustNewConstMetric(
c.sourceFramesPerSecond,
prometheus.CounterValue,
- data[SourceFramesPerSecond].FirstValue,
+ data.SourceFramesPerSecond,
sessionName,
)
}
diff --git a/internal/collector/remote_fx/types.go b/internal/collector/remote_fx/types.go
new file mode 100644
index 000000000..c60980d30
--- /dev/null
+++ b/internal/collector/remote_fx/types.go
@@ -0,0 +1,48 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package remote_fx
+
+type perfDataCounterValuesNetwork struct {
+ Name string
+
+ BaseTCPRTT float64 `perfdata:"Base TCP RTT"`
+ BaseUDPRTT float64 `perfdata:"Base UDP RTT"`
+ CurrentTCPBandwidth float64 `perfdata:"Current TCP Bandwidth"`
+ CurrentTCPRTT float64 `perfdata:"Current TCP RTT"`
+ CurrentUDPBandwidth float64 `perfdata:"Current UDP Bandwidth"`
+ CurrentUDPRTT float64 `perfdata:"Current UDP RTT"`
+ TotalReceivedBytes float64 `perfdata:"Total Received Bytes"`
+ TotalSentBytes float64 `perfdata:"Total Sent Bytes"`
+ UDPPacketsReceivedPersec float64 `perfdata:"UDP Packets Received/sec"`
+ UDPPacketsSentPersec float64 `perfdata:"UDP Packets Sent/sec"`
+ FECRate float64 `perfdata:"FEC rate"`
+ LossRate float64 `perfdata:"Loss rate"`
+ RetransmissionRate float64 `perfdata:"Retransmission rate"`
+}
+
+type perfDataCounterValuesGraphics struct {
+ Name string
+
+ AverageEncodingTime float64 `perfdata:"Average Encoding Time"`
+ FrameQuality float64 `perfdata:"Frame Quality"`
+ FramesSkippedPerSecondInsufficientClientResources float64 `perfdata:"Frames Skipped/Second - Insufficient Server Resources"`
+ FramesSkippedPerSecondInsufficientNetworkResources float64 `perfdata:"Frames Skipped/Second - Insufficient Network Resources"`
+ FramesSkippedPerSecondInsufficientServerResources float64 `perfdata:"Frames Skipped/Second - Insufficient Client Resources"`
+ GraphicsCompressionratio float64 `perfdata:"Graphics Compression ratio"`
+ InputFramesPerSecond float64 `perfdata:"Input Frames/Second"`
+ OutputFramesPerSecond float64 `perfdata:"Output Frames/Second"`
+ SourceFramesPerSecond float64 `perfdata:"Source Frames/Second"`
+}
diff --git a/internal/collector/scheduled_task/scheduled_task.go b/internal/collector/scheduled_task/scheduled_task.go
index e024701ba..78790f5b9 100644
--- a/internal/collector/scheduled_task/scheduled_task.go
+++ b/internal/collector/scheduled_task/scheduled_task.go
@@ -20,23 +20,18 @@ import (
"fmt"
"log/slog"
"regexp"
+ "runtime"
"strings"
- "sync"
"github.com/alecthomas/kingpin/v2"
"github.com/go-ole/go-ole"
"github.com/go-ole/go-ole/oleutil"
- "github.com/prometheus-community/windows_exporter/internal/headers/schedule_service"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
-const (
- Name = "scheduled_task"
-
- workerCount = 4
-)
+const Name = "scheduled_task"
type Config struct {
TaskExclude *regexp.Regexp `yaml:"task_exclude"`
@@ -52,12 +47,6 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- logger *slog.Logger
-
- scheduledTasksReqCh chan struct{}
- scheduledTasksWorker chan scheduledTaskWorkerRequest
- scheduledTasksCh chan scheduledTaskResults
-
lastResult *prometheus.Desc
missedRuns *prometheus.Desc
state *prometheus.Desc
@@ -82,10 +71,7 @@ const (
SCHED_S_TASK_HAS_NOT_RUN TaskResult = 0x00041303
)
-//nolint:gochecknoglobals
-var taskStates = []string{"disabled", "queued", "ready", "running", "unknown"}
-
-type scheduledTask struct {
+type ScheduledTask struct {
Name string
Path string
Enabled bool
@@ -94,15 +80,7 @@ type scheduledTask struct {
LastTaskResult TaskResult
}
-type scheduledTaskResults struct {
- tasks []scheduledTask
- err error
-}
-
-type scheduledTaskWorkerRequest struct {
- folderPath string
- results chan<- scheduledTaskResults
-}
+type ScheduledTasks []ScheduledTask
func New(config *Config) *Collector {
if config == nil {
@@ -165,27 +143,10 @@ func (c *Collector) GetName() string {
}
func (c *Collector) Close() error {
- close(c.scheduledTasksReqCh)
-
- c.scheduledTasksReqCh = nil
-
return nil
}
-func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
- c.logger = logger.With(slog.String("collector", Name))
-
- initErrCh := make(chan error)
- c.scheduledTasksReqCh = make(chan struct{})
- c.scheduledTasksCh = make(chan scheduledTaskResults)
- c.scheduledTasksWorker = make(chan scheduledTaskWorkerRequest, 100)
-
- go c.initializeScheduleService(initErrCh)
-
- if err := <-initErrCh; err != nil {
- return fmt.Errorf("initialize schedule service: %w", err)
- }
-
+func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
c.lastResult = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "last_result"),
"The result that was returned the last time the registered task was run",
@@ -211,7 +172,14 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- scheduledTasks, err := c.getScheduledTasks()
+ return c.collect(ch)
+}
+
+//nolint:gochecknoglobals
+var TASK_STATES = []string{"disabled", "queued", "ready", "running", "unknown"}
+
+func (c *Collector) collect(ch chan<- prometheus.Metric) error {
+ scheduledTasks, err := getScheduledTasks()
if err != nil {
return fmt.Errorf("get scheduled tasks: %w", err)
}
@@ -222,7 +190,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
continue
}
- for _, state := range taskStates {
+ for _, state := range TASK_STATES {
var stateValue float64
if strings.ToLower(task.State.String()) == state {
@@ -265,198 +233,71 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return nil
}
-func (c *Collector) getScheduledTasks() ([]scheduledTask, error) {
- c.scheduledTasksReqCh <- struct{}{}
-
- scheduledTasks, ok := <-c.scheduledTasksCh
-
- if !ok {
- return []scheduledTask{}, nil
- }
+const SCHEDULED_TASK_PROGRAM_ID = "Schedule.Service.1"
- return scheduledTasks.tasks, scheduledTasks.err
-}
+// S_FALSE is returned by CoInitialize if it was already called on this thread.
+const S_FALSE = 0x00000001
-func (c *Collector) initializeScheduleService(initErrCh chan<- error) {
- service := schedule_service.New()
- if err := service.Connect(); err != nil {
- initErrCh <- fmt.Errorf("failed to connect to schedule service: %w", err)
+func getScheduledTasks() (ScheduledTasks, error) {
+ var scheduledTasks ScheduledTasks
- return
- }
+ // The only way to run WMI queries in parallel while being thread-safe is to
+ // ensure the CoInitialize[Ex]() call is bound to its current OS thread.
+ // Otherwise, attempting to initialize and run parallel queries across
+ // goroutines will result in protected memory errors.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
- defer service.Close()
-
- errs := make([]error, 0, workerCount)
-
- for range workerCount {
- errCh := make(chan error, workerCount)
-
- go c.collectWorker(errCh)
-
- if err := <-errCh; err != nil {
- errs = append(errs, err)
+ if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
+ var oleCode *ole.OleError
+ if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != S_FALSE {
+ return nil, err
}
}
+ defer ole.CoUninitialize()
- if err := errors.Join(errs...); err != nil {
- initErrCh <- err
-
- return
- }
-
- close(initErrCh)
-
- taskServiceObj := service.GetOLETaskServiceObj()
- scheduledTasks := make([]scheduledTask, 0, 500)
-
- for range c.scheduledTasksReqCh {
- func() {
- // Clear the slice to avoid memory leaks
- clear(scheduledTasks)
- scheduledTasks = scheduledTasks[:0]
-
- res, err := oleutil.CallMethod(taskServiceObj, "GetFolder", `\`)
- if err != nil {
- c.scheduledTasksCh <- scheduledTaskResults{err: err}
- }
-
- rootFolderObj := res.ToIDispatch()
- defer rootFolderObj.Release()
-
- errs := make([]error, 0)
- scheduledTasksWorkerResults := make(chan scheduledTaskResults)
-
- wg := &sync.WaitGroup{}
-
- go func() {
- for workerResults := range scheduledTasksWorkerResults {
- wg.Done()
-
- if workerResults.err != nil {
- errs = append(errs, workerResults.err)
- }
-
- if workerResults.tasks != nil {
- errs = append(errs, workerResults.err)
-
- scheduledTasks = append(scheduledTasks, workerResults.tasks...)
- }
- }
- }()
-
- if err := c.fetchRecursively(rootFolderObj, wg, scheduledTasksWorkerResults); err != nil {
- errs = append(errs, err)
- }
-
- wg.Wait()
-
- close(scheduledTasksWorkerResults)
-
- c.scheduledTasksCh <- scheduledTaskResults{tasks: scheduledTasks, err: errors.Join(errs...)}
- }()
+ schedClassID, err := ole.ClassIDFrom(SCHEDULED_TASK_PROGRAM_ID)
+ if err != nil {
+ return scheduledTasks, err
}
- close(c.scheduledTasksCh)
- close(c.scheduledTasksWorker)
-
- c.scheduledTasksCh = nil
- c.scheduledTasksWorker = nil
-}
-
-func (c *Collector) collectWorker(errCh chan<- error) {
- defer func() {
- if r := recover(); r != nil {
- c.logger.Error("worker panic",
- slog.Any("panic", r),
- )
-
- errCh := make(chan error, 1)
- // Restart the collectWorker
- go c.collectWorker(errCh)
-
- if err := <-errCh; err != nil {
- c.logger.Error("failed to restart worker",
- slog.Any("err", err),
- )
- }
- }
- }()
-
- service := schedule_service.New()
- if err := service.Connect(); err != nil {
- errCh <- fmt.Errorf("failed to connect to schedule service: %w", err)
-
- return
+ taskSchedulerObj, err := ole.CreateInstance(schedClassID, nil)
+ if err != nil || taskSchedulerObj == nil {
+ return scheduledTasks, err
}
+ defer taskSchedulerObj.Release()
- close(errCh)
-
- defer service.Close()
-
- taskServiceObj := service.GetOLETaskServiceObj()
+ taskServiceObj := taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
- for task := range c.scheduledTasksWorker {
- scheduledTasks, err := fetchTasksInFolder(taskServiceObj, task.folderPath)
-
- task.results <- scheduledTaskResults{tasks: scheduledTasks, err: err}
- }
-}
-
-func (c *Collector) fetchRecursively(folder *ole.IDispatch, wg *sync.WaitGroup, results chan<- scheduledTaskResults) error {
- folderPathVariant, err := oleutil.GetProperty(folder, "Path")
+ _, err = oleutil.CallMethod(taskServiceObj, "Connect")
if err != nil {
- return fmt.Errorf("failed to get folder path: %w", err)
+ return scheduledTasks, err
}
- folderPath := folderPathVariant.ToString()
+ defer taskServiceObj.Release()
- wg.Add(1)
- c.scheduledTasksWorker <- scheduledTaskWorkerRequest{folderPath: folderPath, results: results}
-
- res, err := oleutil.CallMethod(folder, "GetFolders", 1)
+ res, err := oleutil.CallMethod(taskServiceObj, "GetFolder", `\`)
if err != nil {
- return err
+ return scheduledTasks, err
}
- subFolders := res.ToIDispatch()
- defer subFolders.Release()
+ rootFolderObj := res.ToIDispatch()
+ defer rootFolderObj.Release()
- return oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
- subFolder := v.ToIDispatch()
- defer subFolder.Release()
+ err = fetchTasksRecursively(rootFolderObj, &scheduledTasks)
- return c.fetchRecursively(subFolder, wg, results)
- })
+ return scheduledTasks, err
}
-func fetchTasksInFolder(taskServiceObj *ole.IDispatch, folderPath string) ([]scheduledTask, error) {
- folderObjRes, err := oleutil.CallMethod(taskServiceObj, "GetFolder", folderPath)
+func fetchTasksInFolder(folder *ole.IDispatch, scheduledTasks *ScheduledTasks) error {
+ res, err := oleutil.CallMethod(folder, "GetTasks", 1)
if err != nil {
- return nil, fmt.Errorf("failed to get folder %s: %w", folderPath, err)
- }
-
- folderObj := folderObjRes.ToIDispatch()
- defer folderObj.Release()
-
- tasksRes, err := oleutil.CallMethod(folderObj, "GetTasks", 1)
- if err != nil {
- return nil, fmt.Errorf("failed to get tasks in folder %s: %w", folderPath, err)
+ return err
}
- tasks := tasksRes.ToIDispatch()
+ tasks := res.ToIDispatch()
defer tasks.Release()
- // Get task count
- countVariant, err := oleutil.GetProperty(tasks, "Count")
- if err != nil {
- return nil, fmt.Errorf("failed to get task count: %w", err)
- }
-
- taskCount := int(countVariant.Val)
-
- scheduledTasks := make([]scheduledTask, 0, taskCount)
-
err = oleutil.ForEach(tasks, func(v *ole.VARIANT) error {
task := v.ToIDispatch()
defer task.Release()
@@ -466,19 +307,39 @@ func fetchTasksInFolder(taskServiceObj *ole.IDispatch, folderPath string) ([]sch
return err
}
- scheduledTasks = append(scheduledTasks, parsedTask)
+ *scheduledTasks = append(*scheduledTasks, parsedTask)
return nil
})
+
+ return err
+}
+
+func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *ScheduledTasks) error {
+ if err := fetchTasksInFolder(folder, scheduledTasks); err != nil {
+ return err
+ }
+
+ res, err := oleutil.CallMethod(folder, "GetFolders", 1)
if err != nil {
- return nil, fmt.Errorf("failed to iterate over tasks: %w", err)
+ return err
}
- return scheduledTasks, nil
+ subFolders := res.ToIDispatch()
+ defer subFolders.Release()
+
+ err = oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
+ subFolder := v.ToIDispatch()
+ defer subFolder.Release()
+
+ return fetchTasksRecursively(subFolder, scheduledTasks)
+ })
+
+ return err
}
-func parseTask(task *ole.IDispatch) (scheduledTask, error) {
- var scheduledTask scheduledTask
+func parseTask(task *ole.IDispatch) (ScheduledTask, error) {
+ var scheduledTask ScheduledTask
taskNameVar, err := oleutil.GetProperty(task, "Name")
if err != nil {
diff --git a/internal/collector/service/service.go b/internal/collector/service/service.go
index 9bfd460ba..39ca25703 100644
--- a/internal/collector/service/service.go
+++ b/internal/collector/service/service.go
@@ -16,6 +16,7 @@
package service
import (
+ "context"
"errors"
"fmt"
"log/slog"
@@ -211,12 +212,6 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return fmt.Errorf("failed to query services: %w", err)
}
- if len(services) == 0 {
- c.logger.Warn("No services queried")
-
- return nil
- }
-
servicesCh := make(chan windows.ENUM_SERVICE_STATUS_PROCESS, len(services))
wg := sync.WaitGroup{}
wg.Add(len(services))
@@ -249,7 +244,7 @@ func (c *Collector) collectWorker(ch chan<- prometheus.Metric, service windows.E
}
if err := c.collectService(ch, serviceName, service); err != nil {
- c.logger.Warn("failed collecting service info",
+ c.logger.Log(context.Background(), slog.LevelWarn, "failed collecting service info",
slog.Any("err", err),
slog.String("service", serviceName),
)
@@ -267,7 +262,7 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
serviceManager := &mgr.Service{Name: serviceName, Handle: serviceHandle}
defer func(serviceManager *mgr.Service) {
if err := serviceManager.Close(); err != nil {
- c.logger.Warn("failed to close service handle",
+ c.logger.Log(context.Background(), slog.LevelWarn, "failed to close service handle",
slog.Any("err", err),
slog.String("service", serviceName),
)
@@ -281,7 +276,7 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
return fmt.Errorf("failed to get service configuration: %w", err)
}
- c.logger.Debug("failed collecting service config",
+ c.logger.Log(context.Background(), slog.LevelDebug, "failed collecting service config",
slog.Any("err", err),
slog.String("service", serviceName),
)
@@ -350,18 +345,17 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
return nil
}
+ logLevel := slog.LevelWarn
+
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
- c.logger.Debug("failed to get process start time",
- slog.String("service", serviceName),
- slog.Any("err", err),
- )
- } else {
- c.logger.Warn("failed to get process start time",
- slog.String("service", serviceName),
- slog.Any("err", err),
- )
+ logLevel = slog.LevelDebug
}
+ c.logger.Log(context.Background(), logLevel, "failed to get process start time",
+ slog.String("service", serviceName),
+ slog.Any("err", err),
+ )
+
return nil
}
diff --git a/internal/collector/smb/smb.go b/internal/collector/smb/smb.go
index 47979fdf0..86eb53fbe 100644
--- a/internal/collector/smb/smb.go
+++ b/internal/collector/smb/smb.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -36,7 +36,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
treeConnectCount *prometheus.Desc
currentOpenFileCount *prometheus.Desc
@@ -77,16 +78,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("SMB Server Shares", perfdata.InstancesAll, []string{
- currentOpenFileCount,
- treeConnectCount,
- receivedBytes,
- writeRequests,
- readRequests,
- metadataRequests,
- sentBytes,
- filesOpened,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("SMB Server Shares", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
}
@@ -145,66 +137,66 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect collects smb metrics and sends them to prometheus.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect SMB Server Shares metrics: %w", err)
}
- for share, data := range perfData {
+ for _, data := range c.perfDataObject {
ch <- prometheus.MustNewConstMetric(
c.currentOpenFileCount,
prometheus.CounterValue,
- data[currentOpenFileCount].FirstValue,
- share,
+ data.CurrentOpenFileCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.treeConnectCount,
prometheus.CounterValue,
- data[treeConnectCount].FirstValue,
- share,
+ data.TreeConnectCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.receivedBytes,
prometheus.CounterValue,
- data[receivedBytes].FirstValue,
- share,
+ data.ReceivedBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeRequests,
prometheus.CounterValue,
- data[writeRequests].FirstValue,
- share,
+ data.WriteRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readRequests,
prometheus.CounterValue,
- data[readRequests].FirstValue,
- share,
+ data.ReadRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.metadataRequests,
prometheus.CounterValue,
- data[metadataRequests].FirstValue,
- share,
+ data.MetadataRequests,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.sentBytes,
prometheus.CounterValue,
- data[sentBytes].FirstValue,
- share,
+ data.SentBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.filesOpened,
prometheus.CounterValue,
- data[filesOpened].FirstValue,
- share,
+ data.FilesOpened,
+ data.Name,
)
}
diff --git a/internal/collector/smb/const.go b/internal/collector/smb/types.go
similarity index 53%
rename from internal/collector/smb/const.go
rename to internal/collector/smb/types.go
index 2776856bc..f36f934b1 100644
--- a/internal/collector/smb/const.go
+++ b/internal/collector/smb/types.go
@@ -15,13 +15,15 @@
package smb
-const (
- currentOpenFileCount = "Current Open File Count"
- treeConnectCount = "Tree Connect Count"
- receivedBytes = "Received Bytes/sec"
- writeRequests = "Write Requests/sec"
- readRequests = "Read Requests/sec"
- metadataRequests = "Metadata Requests/sec"
- sentBytes = "Sent Bytes/sec"
- filesOpened = "Files Opened/sec"
-)
+type perfDataCounterValues struct {
+ Name string
+
+ CurrentOpenFileCount float64 `perfdata:"Current Open File Count"`
+ TreeConnectCount float64 `perfdata:"Tree Connect Count"`
+ ReceivedBytes float64 `perfdata:"Received Bytes/sec"`
+ WriteRequests float64 `perfdata:"Write Requests/sec"`
+ ReadRequests float64 `perfdata:"Read Requests/sec"`
+ MetadataRequests float64 `perfdata:"Metadata Requests/sec"`
+ SentBytes float64 `perfdata:"Sent Bytes/sec"`
+ FilesOpened float64 `perfdata:"Files Opened/sec"`
+}
diff --git a/internal/collector/smbclient/const.go b/internal/collector/smbclient/const.go
deleted file mode 100644
index e39fe98b5..000000000
--- a/internal/collector/smbclient/const.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package smbclient
-
-const (
- AvgDataQueueLength = "Avg. Data Queue Length"
- AvgReadQueueLength = "Avg. Read Queue Length"
- AvgSecPerRead = "Avg. sec/Read"
- AvgSecPerWrite = "Avg. sec/Write"
- AvgSecPerDataRequest = "Avg. sec/Data Request"
- AvgWriteQueueLength = "Avg. Write Queue Length"
- CreditStallsPerSec = "Credit Stalls/sec"
- CurrentDataQueueLength = "Current Data Queue Length"
- DataBytesPerSec = "Data Bytes/sec"
- DataRequestsPerSec = "Data Requests/sec"
- MetadataRequestsPerSec = "Metadata Requests/sec"
- ReadBytesTransmittedViaSMBDirectPerSec = "Read Bytes transmitted via SMB Direct/sec"
- ReadBytesPerSec = "Read Bytes/sec"
- ReadRequestsTransmittedViaSMBDirectPerSec = "Read Requests transmitted via SMB Direct/sec"
- ReadRequestsPerSec = "Read Requests/sec"
- TurboIOReadsPerSec = "Turbo I/O Reads/sec"
- TurboIOWritesPerSec = "Turbo I/O Writes/sec"
- WriteBytesTransmittedViaSMBDirectPerSec = "Write Bytes transmitted via SMB Direct/sec"
- WriteBytesPerSec = "Write Bytes/sec"
- WriteRequestsTransmittedViaSMBDirectPerSec = "Write Requests transmitted via SMB Direct/sec"
- WriteRequestsPerSec = "Write Requests/sec"
-)
diff --git a/internal/collector/smbclient/smbclient.go b/internal/collector/smbclient/smbclient.go
index 945c6ba26..3c514aaa3 100644
--- a/internal/collector/smbclient/smbclient.go
+++ b/internal/collector/smbclient/smbclient.go
@@ -22,7 +22,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -39,7 +39,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
readBytesTotal *prometheus.Desc
readBytesTransmittedViaSMBDirectTotal *prometheus.Desc
@@ -92,29 +93,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("SMB Client Shares", perfdata.InstancesAll, []string{
- AvgDataQueueLength,
- AvgReadQueueLength,
- AvgSecPerRead,
- AvgSecPerWrite,
- AvgSecPerDataRequest,
- AvgWriteQueueLength,
- CreditStallsPerSec,
- CurrentDataQueueLength,
- DataBytesPerSec,
- DataRequestsPerSec,
- MetadataRequestsPerSec,
- ReadBytesTransmittedViaSMBDirectPerSec,
- ReadBytesPerSec,
- ReadRequestsTransmittedViaSMBDirectPerSec,
- ReadRequestsPerSec,
- TurboIOReadsPerSec,
- TurboIOWritesPerSec,
- WriteBytesTransmittedViaSMBDirectPerSec,
- WriteBytesPerSec,
- WriteRequestsTransmittedViaSMBDirectPerSec,
- WriteRequestsPerSec,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("SMB Client Shares", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
}
@@ -219,13 +198,17 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect collects smb client metrics and sends them to prometheus.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect SMB Client Shares metrics: %w", err)
}
- for name, data := range perfData {
- parsed := strings.FieldsFunc(name, func(r rune) bool { return r == '\\' })
+ for _, data := range c.perfDataObject {
+ parsed := strings.FieldsFunc(data.Name, func(r rune) bool { return r == '\\' })
+ if len(parsed) != 2 {
+ return fmt.Errorf("unexpected number of fields in SMB Client Shares instance name: %q", data.Name)
+ }
+
serverValue := parsed[0]
shareValue := parsed[1]
@@ -233,7 +216,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.requestQueueSecsTotal,
prometheus.CounterValue,
- data[AvgDataQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgDataQueueLength*pdh.TicksToSecondScaleFactor,
serverValue, shareValue,
)
@@ -241,28 +224,28 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.readRequestQueueSecsTotal,
prometheus.CounterValue,
- data[AvgReadQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgReadQueueLength*pdh.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.readSecsTotal,
prometheus.CounterValue,
- data[AvgSecPerRead].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgSecPerRead*pdh.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.writeSecsTotal,
prometheus.CounterValue,
- data[AvgSecPerWrite].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgSecPerWrite*pdh.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.requestSecs,
prometheus.CounterValue,
- data[AvgSecPerDataRequest].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgSecPerDataRequest*pdh.TicksToSecondScaleFactor,
serverValue, shareValue,
)
@@ -270,112 +253,112 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.writeRequestQueueSecsTotal,
prometheus.CounterValue,
- data[AvgWriteQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
+ data.AvgWriteQueueLength*pdh.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.creditStallsTotal,
prometheus.CounterValue,
- data[CreditStallsPerSec].FirstValue,
+ data.CreditStallsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.currentDataQueued,
prometheus.GaugeValue,
- data[CurrentDataQueueLength].FirstValue,
+ data.CurrentDataQueueLength,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataBytesTotal,
prometheus.CounterValue,
- data[DataBytesPerSec].FirstValue,
+ data.DataBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataRequestsTotal,
prometheus.CounterValue,
- data[DataRequestsPerSec].FirstValue,
+ data.DataRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.metadataRequestsTotal,
prometheus.CounterValue,
- data[MetadataRequestsPerSec].FirstValue,
+ data.MetadataRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
- data[ReadBytesTransmittedViaSMBDirectPerSec].FirstValue,
+ data.ReadBytesTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
- data[ReadBytesPerSec].FirstValue,
+ data.ReadBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.readRequestsTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
- data[ReadRequestsTransmittedViaSMBDirectPerSec].FirstValue,
+ data.ReadRequestsTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
- data[ReadRequestsPerSec].FirstValue,
+ data.ReadRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.turboIOReadsTotal,
prometheus.CounterValue,
- data[TurboIOReadsPerSec].FirstValue,
+ data.TurboIOReadsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.TurboIOWritesTotal,
prometheus.CounterValue,
- data[TurboIOWritesPerSec].FirstValue,
+ data.TurboIOWritesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
- data[WriteBytesTransmittedViaSMBDirectPerSec].FirstValue,
+ data.WriteBytesTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
- data[WriteBytesPerSec].FirstValue,
+ data.WriteBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.writeRequestsTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
- data[WriteRequestsTransmittedViaSMBDirectPerSec].FirstValue,
+ data.WriteRequestsTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
- data[WriteRequestsPerSec].FirstValue,
+ data.WriteRequestsPerSec,
serverValue, shareValue,
)
}
diff --git a/internal/collector/smbclient/types.go b/internal/collector/smbclient/types.go
new file mode 100644
index 000000000..ceb62b6fd
--- /dev/null
+++ b/internal/collector/smbclient/types.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package smbclient
+
+type perfDataCounterValues struct {
+ Name string
+
+ AvgDataQueueLength float64 `perfdata:"Avg. Data Queue Length"`
+ AvgReadQueueLength float64 `perfdata:"Avg. Read Queue Length"`
+ AvgSecPerRead float64 `perfdata:"Avg. sec/Read"`
+ AvgSecPerWrite float64 `perfdata:"Avg. sec/Write"`
+ AvgSecPerDataRequest float64 `perfdata:"Avg. sec/Data Request"`
+ AvgWriteQueueLength float64 `perfdata:"Avg. Write Queue Length"`
+ CreditStallsPerSec float64 `perfdata:"Credit Stalls/sec"`
+ CurrentDataQueueLength float64 `perfdata:"Current Data Queue Length"`
+ DataBytesPerSec float64 `perfdata:"Data Bytes/sec"`
+ DataRequestsPerSec float64 `perfdata:"Data Requests/sec"`
+ MetadataRequestsPerSec float64 `perfdata:"Metadata Requests/sec"`
+ ReadBytesTransmittedViaSMBDirectPerSec float64 `perfdata:"Read Bytes transmitted via SMB Direct/sec"`
+ ReadBytesPerSec float64 `perfdata:"Read Bytes/sec"`
+ ReadRequestsTransmittedViaSMBDirectPerSec float64 `perfdata:"Read Requests transmitted via SMB Direct/sec"`
+ ReadRequestsPerSec float64 `perfdata:"Read Requests/sec"`
+ TurboIOReadsPerSec float64 `perfdata:"Turbo I/O Reads/sec"`
+ TurboIOWritesPerSec float64 `perfdata:"Turbo I/O Writes/sec"`
+ WriteBytesTransmittedViaSMBDirectPerSec float64 `perfdata:"Write Bytes transmitted via SMB Direct/sec"`
+ WriteBytesPerSec float64 `perfdata:"Write Bytes/sec"`
+ WriteRequestsTransmittedViaSMBDirectPerSec float64 `perfdata:"Write Requests transmitted via SMB Direct/sec"`
+ WriteRequestsPerSec float64 `perfdata:"Write Requests/sec"`
+}
diff --git a/internal/collector/smtp/const.go b/internal/collector/smtp/const.go
deleted file mode 100644
index f8dcfc251..000000000
--- a/internal/collector/smtp/const.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package smtp
-
-const (
- badmailedMessagesBadPickupFileTotal = "Badmailed Messages (Bad Pickup File)"
- badmailedMessagesGeneralFailureTotal = "Badmailed Messages (General Failure)"
- badmailedMessagesHopCountExceededTotal = "Badmailed Messages (Hop Count Exceeded)"
- badmailedMessagesNDROfDSNTotal = "Badmailed Messages (NDR of DSN)"
- badmailedMessagesNoRecipientsTotal = "Badmailed Messages (No Recipients)"
- badmailedMessagesTriggeredViaEventTotal = "Badmailed Messages (Triggered via Event)"
- bytesSentTotal = "Bytes Sent Total"
- bytesReceivedTotal = "Bytes Received Total"
- categorizerQueueLength = "Categorizer Queue Length"
- connectionErrorsTotal = "Total Connection Errors"
- currentMessagesInLocalDelivery = "Current Messages in Local Delivery"
- directoryDropsTotal = "Directory Drops Total"
- dnsQueriesTotal = "DNS Queries Total"
- dsnFailuresTotal = "Total DSN Failures"
- etrnMessagesTotal = "ETRN Messages Total"
- inboundConnectionsCurrent = "Inbound Connections Current"
- inboundConnectionsTotal = "Inbound Connections Total"
- localQueueLength = "Local Queue Length"
- localRetryQueueLength = "Local Retry Queue Length"
- mailFilesOpen = "Number of MailFiles Open"
- messageBytesReceivedTotal = "Message Bytes Received Total"
- messageBytesSentTotal = "Message Bytes Sent Total"
- messageDeliveryRetriesTotal = "Message Delivery Retries"
- messageSendRetriesTotal = "Message Send Retries"
- messagesCurrentlyUndeliverable = "Messages Currently Undeliverable"
- messagesDeliveredTotal = "Messages Delivered Total"
- messagesPendingRouting = "Messages Pending Routing"
- messagesReceivedTotal = "Messages Received Total"
- messagesRefusedForAddressObjectsTotal = "Messages Refused for Address Objects"
- messagesRefusedForMailObjectsTotal = "Messages Refused for Mail Objects"
- messagesRefusedForSizeTotal = "Messages Refused for Size"
- messagesSentTotal = "Messages Sent Total"
- messagesSubmittedTotal = "Total messages submitted"
- ndrsGeneratedTotal = "NDRs Generated"
- outboundConnectionsCurrent = "Outbound Connections Current"
- outboundConnectionsRefusedTotal = "Outbound Connections Refused"
- outboundConnectionsTotal = "Outbound Connections Total"
- queueFilesOpen = "Number of QueueFiles Open"
- pickupDirectoryMessagesRetrievedTotal = "Pickup Directory Messages Retrieved Total"
- remoteQueueLength = "Remote Queue Length"
- remoteRetryQueueLength = "Remote Retry Queue Length"
- routingTableLookupsTotal = "Routing Table Lookups Total"
-)
diff --git a/internal/collector/smtp/smtp.go b/internal/collector/smtp/smtp.go
index e8a89556c..0c2c984b6 100644
--- a/internal/collector/smtp/smtp.go
+++ b/internal/collector/smtp/smtp.go
@@ -22,7 +22,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -43,7 +43,8 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
badMailedMessagesBadPickupFileTotal *prometheus.Desc
badMailedMessagesGeneralFailureTotal *prometheus.Desc
@@ -158,50 +159,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("SMTP Server", perfdata.InstancesAll, []string{
- badmailedMessagesBadPickupFileTotal,
- badmailedMessagesGeneralFailureTotal,
- badmailedMessagesHopCountExceededTotal,
- badmailedMessagesNDROfDSNTotal,
- badmailedMessagesNoRecipientsTotal,
- badmailedMessagesTriggeredViaEventTotal,
- bytesSentTotal,
- bytesReceivedTotal,
- categorizerQueueLength,
- connectionErrorsTotal,
- currentMessagesInLocalDelivery,
- directoryDropsTotal,
- dnsQueriesTotal,
- dsnFailuresTotal,
- etrnMessagesTotal,
- inboundConnectionsCurrent,
- inboundConnectionsTotal,
- localQueueLength,
- localRetryQueueLength,
- mailFilesOpen,
- messageBytesReceivedTotal,
- messageBytesSentTotal,
- messageDeliveryRetriesTotal,
- messageSendRetriesTotal,
- messagesCurrentlyUndeliverable,
- messagesDeliveredTotal,
- messagesPendingRouting,
- messagesReceivedTotal,
- messagesRefusedForAddressObjectsTotal,
- messagesRefusedForMailObjectsTotal,
- messagesRefusedForSizeTotal,
- messagesSentTotal,
- messagesSubmittedTotal,
- ndrsGeneratedTotal,
- outboundConnectionsCurrent,
- outboundConnectionsRefusedTotal,
- outboundConnectionsTotal,
- queueFilesOpen,
- pickupDirectoryMessagesRetrievedTotal,
- remoteQueueLength,
- remoteRetryQueueLength,
- routingTableLookupsTotal,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("SMTP Server", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
}
@@ -469,302 +427,302 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect SMTP Server metrics: %w", err)
}
- for name, server := range perfData {
- if c.config.ServerExclude.MatchString(name) ||
- !c.config.ServerInclude.MatchString(name) {
+ for _, data := range c.perfDataObject {
+ if c.config.ServerExclude.MatchString(data.Name) ||
+ !c.config.ServerInclude.MatchString(data.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.badMailedMessagesBadPickupFileTotal,
prometheus.CounterValue,
- server[badmailedMessagesBadPickupFileTotal].FirstValue,
- name,
+ data.BadmailedMessagesBadPickupFileTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.badMailedMessagesHopCountExceededTotal,
prometheus.CounterValue,
- server[badmailedMessagesHopCountExceededTotal].FirstValue,
- name,
+ data.BadmailedMessagesHopCountExceededTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.badMailedMessagesNDROfDSNTotal,
prometheus.CounterValue,
- server[badmailedMessagesNDROfDSNTotal].FirstValue,
- name,
+ data.BadmailedMessagesNDROfDSNTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.badMailedMessagesNoRecipientsTotal,
prometheus.CounterValue,
- server[badmailedMessagesNoRecipientsTotal].FirstValue,
- name,
+ data.BadmailedMessagesNoRecipientsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.badMailedMessagesTriggeredViaEventTotal,
prometheus.CounterValue,
- server[badmailedMessagesTriggeredViaEventTotal].FirstValue,
- name,
+ data.BadmailedMessagesTriggeredViaEventTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSentTotal,
prometheus.CounterValue,
- server[bytesSentTotal].FirstValue,
- name,
+ data.BytesSentTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesReceivedTotal,
prometheus.CounterValue,
- server[bytesReceivedTotal].FirstValue,
- name,
+ data.BytesReceivedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.categorizerQueueLength,
prometheus.GaugeValue,
- server[categorizerQueueLength].FirstValue,
- name,
+ data.CategorizerQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionErrorsTotal,
prometheus.CounterValue,
- server[connectionErrorsTotal].FirstValue,
- name,
+ data.ConnectionErrorsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.currentMessagesInLocalDelivery,
prometheus.GaugeValue,
- server[currentMessagesInLocalDelivery].FirstValue,
- name,
+ data.CurrentMessagesInLocalDelivery,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.directoryDropsTotal,
prometheus.CounterValue,
- server[directoryDropsTotal].FirstValue,
- name,
+ data.DirectoryDropsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dsnFailuresTotal,
prometheus.CounterValue,
- server[dsnFailuresTotal].FirstValue,
- name,
+ data.DsnFailuresTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dnsQueriesTotal,
prometheus.CounterValue,
- server[dnsQueriesTotal].FirstValue,
- name,
+ data.DnsQueriesTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.etrnMessagesTotal,
prometheus.CounterValue,
- server[etrnMessagesTotal].FirstValue,
- name,
+ data.EtrnMessagesTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.inboundConnectionsTotal,
prometheus.CounterValue,
- server[inboundConnectionsTotal].FirstValue,
- name,
+ data.InboundConnectionsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.inboundConnectionsCurrent,
prometheus.GaugeValue,
- server[inboundConnectionsCurrent].FirstValue,
- name,
+ data.InboundConnectionsCurrent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.localQueueLength,
prometheus.GaugeValue,
- server[localQueueLength].FirstValue,
- name,
+ data.LocalQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.localRetryQueueLength,
prometheus.GaugeValue,
- server[localRetryQueueLength].FirstValue,
- name,
+ data.LocalRetryQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.mailFilesOpen,
prometheus.GaugeValue,
- server[mailFilesOpen].FirstValue,
- name,
+ data.MailFilesOpen,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messageBytesReceivedTotal,
prometheus.CounterValue,
- server[messageBytesReceivedTotal].FirstValue,
- name,
+ data.MessageBytesReceivedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messageBytesSentTotal,
prometheus.CounterValue,
- server[messageBytesSentTotal].FirstValue,
- name,
+ data.MessageBytesSentTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messageDeliveryRetriesTotal,
prometheus.CounterValue,
- server[messageDeliveryRetriesTotal].FirstValue,
- name,
+ data.MessageDeliveryRetriesTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messageSendRetriesTotal,
prometheus.CounterValue,
- server[messageSendRetriesTotal].FirstValue,
- name,
+ data.MessageSendRetriesTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesCurrentlyUndeliverable,
prometheus.GaugeValue,
- server[messagesCurrentlyUndeliverable].FirstValue,
- name,
+ data.MessagesCurrentlyUndeliverable,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesDeliveredTotal,
prometheus.CounterValue,
- server[messagesDeliveredTotal].FirstValue,
- name,
+ data.MessagesDeliveredTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesPendingRouting,
prometheus.GaugeValue,
- server[messagesPendingRouting].FirstValue,
- name,
+ data.MessagesPendingRouting,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesReceivedTotal,
prometheus.CounterValue,
- server[messagesReceivedTotal].FirstValue,
- name,
+ data.MessagesReceivedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesRefusedForAddressObjectsTotal,
prometheus.CounterValue,
- server[messagesRefusedForAddressObjectsTotal].FirstValue,
- name,
+ data.MessagesRefusedForAddressObjectsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesRefusedForMailObjectsTotal,
prometheus.CounterValue,
- server[messagesRefusedForMailObjectsTotal].FirstValue,
- name,
+ data.MessagesRefusedForMailObjectsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesRefusedForSizeTotal,
prometheus.CounterValue,
- server[messagesRefusedForSizeTotal].FirstValue,
- name,
+ data.MessagesRefusedForSizeTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesSentTotal,
prometheus.CounterValue,
- server[messagesSentTotal].FirstValue,
- name,
+ data.MessagesSentTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.messagesSubmittedTotal,
prometheus.CounterValue,
- server[messagesSubmittedTotal].FirstValue,
- name,
+ data.MessagesSubmittedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ndrsGeneratedTotal,
prometheus.CounterValue,
- server[ndrsGeneratedTotal].FirstValue,
- name,
+ data.NdrsGeneratedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.outboundConnectionsCurrent,
prometheus.GaugeValue,
- server[outboundConnectionsCurrent].FirstValue,
- name,
+ data.OutboundConnectionsCurrent,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.outboundConnectionsRefusedTotal,
prometheus.CounterValue,
- server[outboundConnectionsRefusedTotal].FirstValue,
- name,
+ data.OutboundConnectionsRefusedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.outboundConnectionsTotal,
prometheus.CounterValue,
- server[outboundConnectionsTotal].FirstValue,
- name,
+ data.OutboundConnectionsTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.queueFilesOpen,
prometheus.GaugeValue,
- server[queueFilesOpen].FirstValue,
- name,
+ data.QueueFilesOpen,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pickupDirectoryMessagesRetrievedTotal,
prometheus.CounterValue,
- server[pickupDirectoryMessagesRetrievedTotal].FirstValue,
- name,
+ data.PickupDirectoryMessagesRetrievedTotal,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.remoteQueueLength,
prometheus.GaugeValue,
- server[remoteQueueLength].FirstValue,
- name,
+ data.RemoteQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.remoteRetryQueueLength,
prometheus.GaugeValue,
- server[remoteRetryQueueLength].FirstValue,
- name,
+ data.RemoteRetryQueueLength,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.routingTableLookupsTotal,
prometheus.CounterValue,
- server[routingTableLookupsTotal].FirstValue,
- name,
+ data.RoutingTableLookupsTotal,
+ data.Name,
)
}
diff --git a/internal/collector/smtp/types.go b/internal/collector/smtp/types.go
new file mode 100644
index 000000000..d47b105cf
--- /dev/null
+++ b/internal/collector/smtp/types.go
@@ -0,0 +1,63 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package smtp
+
+type perfDataCounterValues struct {
+ Name string
+
+ BadmailedMessagesBadPickupFileTotal float64 `perfdata:"Badmailed Messages (Bad Pickup File)"`
+ BadmailedMessagesGeneralFailureTotal float64 `perfdata:"Badmailed Messages (General Failure)"`
+ BadmailedMessagesHopCountExceededTotal float64 `perfdata:"Badmailed Messages (Hop Count Exceeded)"`
+ BadmailedMessagesNDROfDSNTotal float64 `perfdata:"Badmailed Messages (NDR of DSN)"`
+ BadmailedMessagesNoRecipientsTotal float64 `perfdata:"Badmailed Messages (No Recipients)"`
+ BadmailedMessagesTriggeredViaEventTotal float64 `perfdata:"Badmailed Messages (Triggered via Event)"`
+ BytesSentTotal float64 `perfdata:"Bytes Sent Total"`
+ BytesReceivedTotal float64 `perfdata:"Bytes Received Total"`
+ CategorizerQueueLength float64 `perfdata:"Categorizer Queue Length"`
+ ConnectionErrorsTotal float64 `perfdata:"Total Connection Errors"`
+ CurrentMessagesInLocalDelivery float64 `perfdata:"Current Messages in Local Delivery"`
+ DirectoryDropsTotal float64 `perfdata:"Directory Drops Total"`
+ DnsQueriesTotal float64 `perfdata:"DNS Queries Total"`
+ DsnFailuresTotal float64 `perfdata:"Total DSN Failures"`
+ EtrnMessagesTotal float64 `perfdata:"ETRN Messages Total"`
+ InboundConnectionsCurrent float64 `perfdata:"Inbound Connections Current"`
+ InboundConnectionsTotal float64 `perfdata:"Inbound Connections Total"`
+ LocalQueueLength float64 `perfdata:"Local Queue Length"`
+ LocalRetryQueueLength float64 `perfdata:"Local Retry Queue Length"`
+ MailFilesOpen float64 `perfdata:"Number of MailFiles Open"`
+ MessageBytesReceivedTotal float64 `perfdata:"Message Bytes Received Total"`
+ MessageBytesSentTotal float64 `perfdata:"Message Bytes Sent Total"`
+ MessageDeliveryRetriesTotal float64 `perfdata:"Message Delivery Retries"`
+ MessageSendRetriesTotal float64 `perfdata:"Message Send Retries"`
+ MessagesCurrentlyUndeliverable float64 `perfdata:"Messages Currently Undeliverable"`
+ MessagesDeliveredTotal float64 `perfdata:"Messages Delivered Total"`
+ MessagesPendingRouting float64 `perfdata:"Messages Pending Routing"`
+ MessagesReceivedTotal float64 `perfdata:"Messages Received Total"`
+ MessagesRefusedForAddressObjectsTotal float64 `perfdata:"Messages Refused for Address Objects"`
+ MessagesRefusedForMailObjectsTotal float64 `perfdata:"Messages Refused for Mail Objects"`
+ MessagesRefusedForSizeTotal float64 `perfdata:"Messages Refused for Size"`
+ MessagesSentTotal float64 `perfdata:"Messages Sent Total"`
+ MessagesSubmittedTotal float64 `perfdata:"Total messages submitted"`
+ NdrsGeneratedTotal float64 `perfdata:"NDRs Generated"`
+ OutboundConnectionsCurrent float64 `perfdata:"Outbound Connections Current"`
+ OutboundConnectionsRefusedTotal float64 `perfdata:"Outbound Connections Refused"`
+ OutboundConnectionsTotal float64 `perfdata:"Outbound Connections Total"`
+ QueueFilesOpen float64 `perfdata:"Number of QueueFiles Open"`
+ PickupDirectoryMessagesRetrievedTotal float64 `perfdata:"Pickup Directory Messages Retrieved Total"`
+ RemoteQueueLength float64 `perfdata:"Remote Queue Length"`
+ RemoteRetryQueueLength float64 `perfdata:"Remote Retry Queue Length"`
+ RoutingTableLookupsTotal float64 `perfdata:"Routing Table Lookups Total"`
+}
diff --git a/internal/collector/system/system.go b/internal/collector/system/system.go
index 4d28e2d91..0557c3da5 100644
--- a/internal/collector/system/system.go
+++ b/internal/collector/system/system.go
@@ -16,13 +16,12 @@
package system
import (
- "errors"
"fmt"
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -38,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
contextSwitchesTotal *prometheus.Desc
exceptionDispatchesTotal *prometheus.Desc
@@ -79,15 +79,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("System", nil, []string{
- contextSwitchesPersec,
- exceptionDispatchesPersec,
- processorQueueLength,
- systemCallsPersec,
- systemUpTime,
- processes,
- threads,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("System", nil)
if err != nil {
return fmt.Errorf("failed to create System collector: %w", err)
}
@@ -148,50 +140,45 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect System metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return errors.New("query for System returned empty result set")
- }
-
ch <- prometheus.MustNewConstMetric(
c.contextSwitchesTotal,
prometheus.CounterValue,
- data[contextSwitchesPersec].FirstValue,
+ c.perfDataObject[0].ContextSwitchesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.exceptionDispatchesTotal,
prometheus.CounterValue,
- data[exceptionDispatchesPersec].FirstValue,
+ c.perfDataObject[0].ExceptionDispatchesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.processorQueueLength,
prometheus.GaugeValue,
- data[processorQueueLength].FirstValue,
+ c.perfDataObject[0].ProcessorQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.processes,
prometheus.GaugeValue,
- data[processes].FirstValue,
+ c.perfDataObject[0].Processes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCallsTotal,
prometheus.CounterValue,
- data[systemCallsPersec].FirstValue,
+ c.perfDataObject[0].SystemCallsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.bootTime,
prometheus.GaugeValue,
- data[systemUpTime].FirstValue,
+ c.perfDataObject[0].SystemUpTime,
)
ch <- prometheus.MustNewConstMetric(
c.threads,
prometheus.GaugeValue,
- data[threads].FirstValue,
+ c.perfDataObject[0].Threads,
)
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
diff --git a/internal/collector/system/const.go b/internal/collector/system/types.go
similarity index 56%
rename from internal/collector/system/const.go
rename to internal/collector/system/types.go
index 72dbd6654..03c156fa5 100644
--- a/internal/collector/system/const.go
+++ b/internal/collector/system/types.go
@@ -15,12 +15,12 @@
package system
-const (
- contextSwitchesPersec = "Context Switches/sec"
- exceptionDispatchesPersec = "Exception Dispatches/sec"
- processorQueueLength = "Processor Queue Length"
- systemCallsPersec = "System Calls/sec"
- systemUpTime = "System Up Time"
- processes = "Processes"
- threads = "Threads"
-)
+type perfDataCounterValues struct {
+ ContextSwitchesPerSec float64 `perfdata:"Context Switches/sec"`
+ ExceptionDispatchesPerSec float64 `perfdata:"Exception Dispatches/sec"`
+ ProcessorQueueLength float64 `perfdata:"Processor Queue Length"`
+ SystemCallsPerSec float64 `perfdata:"System Calls/sec"`
+ SystemUpTime float64 `perfdata:"System Up Time"`
+ Processes float64 `perfdata:"Processes"`
+ Threads float64 `perfdata:"Threads"`
+}
diff --git a/internal/collector/tcp/tcp.go b/internal/collector/tcp/tcp.go
index 716fa74e0..c9ed665b3 100644
--- a/internal/collector/tcp/tcp.go
+++ b/internal/collector/tcp/tcp.go
@@ -25,7 +25,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/iphlpapi"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@@ -49,8 +49,10 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- perfDataCollector4 *perfdata.Collector
- perfDataCollector6 *perfdata.Collector
+ perfDataCollector4 *pdh.Collector
+ perfDataCollector6 *pdh.Collector
+ perfDataObject4 []perfDataCounterValues
+ perfDataObject6 []perfDataCounterValues
connectionFailures *prometheus.Desc
connectionsActive *prometheus.Desc
@@ -116,26 +118,14 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
- counters := []string{
- connectionFailures,
- connectionsActive,
- connectionsEstablished,
- connectionsPassive,
- connectionsReset,
- segmentsPerSec,
- segmentsReceivedPerSec,
- segmentsRetransmittedPerSec,
- segmentsSentPerSec,
- }
-
var err error
- c.perfDataCollector4, err = perfdata.NewCollector("TCPv4", nil, counters)
+ c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues]("TCPv4", nil)
if err != nil {
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
}
- c.perfDataCollector6, err = perfdata.NewCollector("TCPv6", nil, counters)
+ c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues]("TCPv6", nil)
if err != nil {
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
}
@@ -224,84 +214,76 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollector4.Collect()
+ err := c.perfDataCollector4.Collect(&c.perfDataObject4)
if err != nil {
- return fmt.Errorf("failed to collect TCPv4 metrics: %w", err)
- }
-
- if _, ok := data[perfdata.InstanceEmpty]; !ok {
- return errors.New("no data for TCPv4")
+ return fmt.Errorf("failed to collect TCPv4 metrics[0]. %w", err)
}
- c.writeTCPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv4"})
+ c.writeTCPCounters(ch, c.perfDataObject4, []string{"ipv4"})
- data, err = c.perfDataCollector6.Collect()
+ err = c.perfDataCollector6.Collect(&c.perfDataObject6)
if err != nil {
- return fmt.Errorf("failed to collect TCPv6 metrics: %w", err)
- }
-
- if _, ok := data[perfdata.InstanceEmpty]; !ok {
- return errors.New("no data for TCPv6")
+ return fmt.Errorf("failed to collect TCPv6 metrics[0]. %w", err)
}
- c.writeTCPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv6"})
+ c.writeTCPCounters(ch, c.perfDataObject6, []string{"ipv6"})
return nil
}
-func (c *Collector) writeTCPCounters(ch chan<- prometheus.Metric, metrics map[string]perfdata.CounterValue, labels []string) {
+func (c *Collector) writeTCPCounters(ch chan<- prometheus.Metric, metrics []perfDataCounterValues, labels []string) {
ch <- prometheus.MustNewConstMetric(
c.connectionFailures,
prometheus.CounterValue,
- metrics[connectionFailures].FirstValue,
+ metrics[0].ConnectionFailures,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.connectionsActive,
prometheus.CounterValue,
- metrics[connectionsActive].FirstValue,
+ metrics[0].ConnectionsActive,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.connectionsEstablished,
prometheus.GaugeValue,
- metrics[connectionsEstablished].FirstValue,
+ metrics[0].ConnectionsEstablished,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.connectionsPassive,
prometheus.CounterValue,
- metrics[connectionsPassive].FirstValue,
+ metrics[0].ConnectionsPassive,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.connectionsReset,
prometheus.CounterValue,
- metrics[connectionsReset].FirstValue,
+ metrics[0].ConnectionsReset,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.segmentsTotal,
prometheus.CounterValue,
- metrics[segmentsPerSec].FirstValue,
+ metrics[0].SegmentsPerSec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.segmentsReceivedTotal,
prometheus.CounterValue,
- metrics[segmentsReceivedPerSec].FirstValue,
+ metrics[0].SegmentsReceivedPerSec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.segmentsRetransmittedTotal,
prometheus.CounterValue,
- metrics[segmentsRetransmittedPerSec].FirstValue,
+ metrics[0].SegmentsRetransmittedPerSec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.segmentsSentTotal,
prometheus.CounterValue,
- metrics[segmentsSentPerSec].FirstValue,
+ metrics[0].SegmentsSentPerSec,
labels...,
)
}
diff --git a/internal/collector/tcp/const.go b/internal/collector/tcp/types.go
similarity index 57%
rename from internal/collector/tcp/const.go
rename to internal/collector/tcp/types.go
index 9156ddd2b..431f3b1c0 100644
--- a/internal/collector/tcp/const.go
+++ b/internal/collector/tcp/types.go
@@ -19,14 +19,14 @@ package tcp
// - https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx
// The TCPv6 performance object uses the same fields.
// https://learn.microsoft.com/en-us/dotnet/api/system.net.networkinformation.tcpstate?view=net-8.0.
-const (
- connectionFailures = "Connection Failures"
- connectionsActive = "Connections Active"
- connectionsEstablished = "Connections Established"
- connectionsPassive = "Connections Passive"
- connectionsReset = "Connections Reset"
- segmentsPerSec = "Segments/sec"
- segmentsReceivedPerSec = "Segments Received/sec"
- segmentsRetransmittedPerSec = "Segments Retransmitted/sec"
- segmentsSentPerSec = "Segments Sent/sec"
-)
+type perfDataCounterValues struct {
+ ConnectionFailures float64 `perfdata:"Connection Failures"`
+ ConnectionsActive float64 `perfdata:"Connections Active"`
+ ConnectionsEstablished float64 `perfdata:"Connections Established"`
+ ConnectionsPassive float64 `perfdata:"Connections Passive"`
+ ConnectionsReset float64 `perfdata:"Connections Reset"`
+ SegmentsPerSec float64 `perfdata:"Segments/sec"`
+ SegmentsReceivedPerSec float64 `perfdata:"Segments Received/sec"`
+ SegmentsRetransmittedPerSec float64 `perfdata:"Segments Retransmitted/sec"`
+ SegmentsSentPerSec float64 `perfdata:"Segments Sent/sec"`
+}
diff --git a/internal/collector/terminal_services/const.go b/internal/collector/terminal_services/const.go
deleted file mode 100644
index ee52d8c45..000000000
--- a/internal/collector/terminal_services/const.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package terminal_services
-
-const (
- handleCount = "Handle Count"
- pageFaultsPersec = "Page Faults/sec"
- pageFileBytes = "Page File Bytes"
- pageFileBytesPeak = "Page File Bytes Peak"
- percentPrivilegedTime = "% Privileged Time"
- percentProcessorTime = "% Processor Time"
- percentUserTime = "% User Time"
- poolNonpagedBytes = "Pool Nonpaged Bytes"
- poolPagedBytes = "Pool Paged Bytes"
- privateBytes = "Private Bytes"
- threadCount = "Thread Count"
- virtualBytes = "Virtual Bytes"
- virtualBytesPeak = "Virtual Bytes Peak"
- workingSet = "Working Set"
- workingSetPeak = "Working Set Peak"
-
- successfulConnections = "Successful Connections"
- pendingConnections = "Pending Connections"
- failedConnections = "Failed Connections"
-)
diff --git a/internal/collector/terminal_services/terminal_services.go b/internal/collector/terminal_services/terminal_services.go
index 33726d877..f9f6af282 100644
--- a/internal/collector/terminal_services/terminal_services.go
+++ b/internal/collector/terminal_services/terminal_services.go
@@ -25,7 +25,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/wtsapi32"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -72,8 +72,11 @@ type Collector struct {
connectionBrokerEnabled bool
- perfDataCollectorTerminalServicesSession *perfdata.Collector
- perfDataCollectorBroker *perfdata.Collector
+ perfDataCollectorTerminalServicesSession *pdh.Collector
+ perfDataCollectorBroker *pdh.Collector
+
+ perfDataObjectTerminalServicesSession []perfDataCounterValuesTerminalServicesSession
+ perfDataObjectBroker []perfDataCounterValuesBroker
hServer windows.Handle
@@ -136,27 +139,9 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
- counters := []string{
- handleCount,
- pageFaultsPersec,
- pageFileBytes,
- pageFileBytesPeak,
- percentPrivilegedTime,
- percentProcessorTime,
- percentUserTime,
- poolNonpagedBytes,
- poolPagedBytes,
- privateBytes,
- threadCount,
- virtualBytes,
- virtualBytesPeak,
- workingSet,
- workingSetPeak,
- }
-
var err error
- c.perfDataCollectorTerminalServicesSession, err = perfdata.NewCollector("Terminal Services Session", perfdata.InstancesAll, counters)
+ c.perfDataCollectorTerminalServicesSession, err = pdh.NewCollector[perfDataCounterValuesTerminalServicesSession]("Terminal Services Session", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Terminal Services Session collector: %w", err)
}
@@ -164,15 +149,9 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
if c.connectionBrokerEnabled {
- counters = []string{
- successfulConnections,
- pendingConnections,
- failedConnections,
- }
-
var err error
- c.perfDataCollectorBroker, err = perfdata.NewCollector("Remote Desktop Connection Broker Counterset", perfdata.InstancesAll, counters)
+ c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker]("Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
}
@@ -303,16 +282,16 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectTSSessionCounters(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorTerminalServicesSession.Collect()
+ err := c.perfDataCollectorTerminalServicesSession.Collect(&c.perfDataObjectTerminalServicesSession)
if err != nil {
return fmt.Errorf("failed to collect Terminal Services Session metrics: %w", err)
}
names := make(map[string]bool)
- for name, data := range perfData {
+ for _, data := range c.perfDataObjectTerminalServicesSession {
// only connect metrics for remote named sessions
- n := strings.ToLower(name)
+ n := strings.ToLower(data.Name)
if n == "" || n == "services" || n == "console" {
continue
}
@@ -326,95 +305,95 @@ func (c *Collector) collectTSSessionCounters(ch chan<- prometheus.Metric) error
ch <- prometheus.MustNewConstMetric(
c.handleCount,
prometheus.GaugeValue,
- data[handleCount].FirstValue,
- name,
+ data.HandleCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pageFaultsPerSec,
prometheus.CounterValue,
- data[pageFaultsPersec].FirstValue,
- name,
+ data.PageFaultsPersec,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pageFileBytes,
prometheus.GaugeValue,
- data[pageFileBytes].FirstValue,
- name,
+ data.PageFileBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pageFileBytesPeak,
prometheus.GaugeValue,
- data[pageFileBytesPeak].FirstValue,
- name,
+ data.PageFileBytesPeak,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.percentCPUTime,
prometheus.CounterValue,
- data[percentPrivilegedTime].FirstValue,
- name,
+ data.PercentPrivilegedTime,
+ data.Name,
"privileged",
)
ch <- prometheus.MustNewConstMetric(
c.percentCPUTime,
prometheus.CounterValue,
- data[percentProcessorTime].FirstValue,
- name,
+ data.PercentProcessorTime,
+ data.Name,
"processor",
)
ch <- prometheus.MustNewConstMetric(
c.percentCPUTime,
prometheus.CounterValue,
- data[percentUserTime].FirstValue,
- name,
+ data.PercentUserTime,
+ data.Name,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedBytes,
prometheus.GaugeValue,
- data[poolNonpagedBytes].FirstValue,
- name,
+ data.PoolNonpagedBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedBytes,
prometheus.GaugeValue,
- data[poolPagedBytes].FirstValue,
- name,
+ data.PoolPagedBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.privateBytes,
prometheus.GaugeValue,
- data[privateBytes].FirstValue,
- name,
+ data.PrivateBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.threadCount,
prometheus.GaugeValue,
- data[threadCount].FirstValue,
- name,
+ data.ThreadCount,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualBytes,
prometheus.GaugeValue,
- data[virtualBytes].FirstValue,
- name,
+ data.VirtualBytes,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualBytesPeak,
prometheus.GaugeValue,
- data[virtualBytesPeak].FirstValue,
- name,
+ data.VirtualBytesPeak,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.workingSet,
prometheus.GaugeValue,
- data[workingSet].FirstValue,
- name,
+ data.WorkingSet,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.workingSetPeak,
prometheus.GaugeValue,
- data[workingSetPeak].FirstValue,
- name,
+ data.WorkingSetPeak,
+ data.Name,
)
}
@@ -422,34 +401,29 @@ func (c *Collector) collectTSSessionCounters(ch chan<- prometheus.Metric) error
}
func (c *Collector) collectCollectionBrokerPerformanceCounter(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorBroker.Collect()
+ err := c.perfDataCollectorBroker.Collect(&c.perfDataObjectBroker)
if err != nil {
return fmt.Errorf("failed to collect Remote Desktop Connection Broker Counterset metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return errors.New("query for Remote Desktop Connection Broker Counterset returned empty result set")
- }
-
ch <- prometheus.MustNewConstMetric(
c.connectionBrokerPerformance,
prometheus.CounterValue,
- data[successfulConnections].FirstValue,
+ c.perfDataObjectBroker[0].SuccessfulConnections,
"Successful",
)
ch <- prometheus.MustNewConstMetric(
c.connectionBrokerPerformance,
prometheus.CounterValue,
- data[pendingConnections].FirstValue,
+ c.perfDataObjectBroker[0].PendingConnections,
"Pending",
)
ch <- prometheus.MustNewConstMetric(
c.connectionBrokerPerformance,
prometheus.CounterValue,
- data[failedConnections].FirstValue,
+ c.perfDataObjectBroker[0].FailedConnections,
"Failed",
)
diff --git a/internal/collector/terminal_services/types.go b/internal/collector/terminal_services/types.go
new file mode 100644
index 000000000..4a2521f7e
--- /dev/null
+++ b/internal/collector/terminal_services/types.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package terminal_services
+
+type perfDataCounterValuesTerminalServicesSession struct {
+ Name string
+
+ HandleCount float64 `perfdata:"Handle Count"`
+ PageFaultsPersec float64 `perfdata:"Page Faults/sec"`
+ PageFileBytes float64 `perfdata:"Page File Bytes"`
+ PageFileBytesPeak float64 `perfdata:"Page File Bytes Peak"`
+ PercentPrivilegedTime float64 `perfdata:"% Privileged Time"`
+ PercentProcessorTime float64 `perfdata:"% Processor Time"`
+ PercentUserTime float64 `perfdata:"% User Time"`
+ PoolNonpagedBytes float64 `perfdata:"Pool Nonpaged Bytes"`
+ PoolPagedBytes float64 `perfdata:"Pool Paged Bytes"`
+ PrivateBytes float64 `perfdata:"Private Bytes"`
+ ThreadCount float64 `perfdata:"Thread Count"`
+ VirtualBytes float64 `perfdata:"Virtual Bytes"`
+ VirtualBytesPeak float64 `perfdata:"Virtual Bytes Peak"`
+ WorkingSet float64 `perfdata:"Working Set"`
+ WorkingSetPeak float64 `perfdata:"Working Set Peak"`
+}
+
+type perfDataCounterValuesBroker struct {
+ SuccessfulConnections float64 `perfdata:"Successful Connections"`
+ PendingConnections float64 `perfdata:"Pending Connections"`
+ FailedConnections float64 `perfdata:"Failed Connections"`
+}
diff --git a/internal/collector/thermalzone/thermalzone.go b/internal/collector/thermalzone/thermalzone.go
index 52be5fd89..bea13e14f 100644
--- a/internal/collector/thermalzone/thermalzone.go
+++ b/internal/collector/thermalzone/thermalzone.go
@@ -21,7 +21,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,7 +37,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
percentPassiveLimit *prometheus.Desc
temperature *prometheus.Desc
@@ -71,11 +72,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Thermal Zone Information", perfdata.InstancesAll, []string{
- highPrecisionTemperature,
- percentPassiveLimit,
- throttleReasons,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Thermal Zone Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
}
@@ -111,32 +108,32 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Thermal Zone Information metrics: %w", err)
}
- for sensorName, data := range perfData {
+ for _, data := range c.perfDataObject {
// Divide by 10 and subtract 273.15 to convert decikelvin to celsius
ch <- prometheus.MustNewConstMetric(
c.temperature,
prometheus.GaugeValue,
- (data[highPrecisionTemperature].FirstValue/10.0)-273.15,
- sensorName,
+ (data.HighPrecisionTemperature/10.0)-273.15,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.percentPassiveLimit,
prometheus.GaugeValue,
- data[percentPassiveLimit].FirstValue,
- sensorName,
+ data.PercentPassiveLimit,
+ data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.throttleReasons,
prometheus.GaugeValue,
- data[throttleReasons].FirstValue,
- sensorName,
+ data.ThrottleReasons,
+ data.Name,
)
}
diff --git a/internal/collector/thermalzone/const.go b/internal/collector/thermalzone/types.go
similarity index 70%
rename from internal/collector/thermalzone/const.go
rename to internal/collector/thermalzone/types.go
index 65efeda72..a015a0a43 100644
--- a/internal/collector/thermalzone/const.go
+++ b/internal/collector/thermalzone/types.go
@@ -13,8 +13,10 @@
package thermalzone
-const (
- highPrecisionTemperature = "High Precision Temperature"
- percentPassiveLimit = "% Passive Limit"
- throttleReasons = "Throttle Reasons"
-)
+type perfDataCounterValues struct {
+ Name string
+
+ HighPrecisionTemperature float64 `perfdata:"High Precision Temperature"`
+ PercentPassiveLimit float64 `perfdata:"% Passive Limit"`
+ ThrottleReasons float64 `perfdata:"Throttle Reasons"`
+}
diff --git a/internal/collector/time/time.go b/internal/collector/time/time.go
index c9b4e4804..5a98a31a0 100644
--- a/internal/collector/time/time.go
+++ b/internal/collector/time/time.go
@@ -26,7 +26,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@@ -55,7 +55,8 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
- perfDataCollector *perfdata.Collector
+ perfDataCollector *pdh.Collector
+ perfDataObject []perfDataCounterValues
currentTime *prometheus.Desc
timezone *prometheus.Desc
@@ -126,14 +127,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollector, err = perfdata.NewCollector("Windows Time Service", nil, []string{
- ClockFrequencyAdjustmentPPBTotal,
- ComputedTimeOffset,
- NTPClientTimeSourceCount,
- NTPRoundTripDelay,
- NTPServerIncomingRequestsTotal,
- NTPServerOutgoingResponsesTotal,
- })
+ c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues]("Windows Time Service", nil)
if err != nil {
return fmt.Errorf("failed to create Windows Time Service collector: %w", err)
}
@@ -236,45 +230,40 @@ func (c *Collector) collectTime(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectNTP(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollector.Collect()
+ err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
- }
-
ch <- prometheus.MustNewConstMetric(
c.clockFrequencyAdjustmentPPBTotal,
prometheus.CounterValue,
- data[ClockFrequencyAdjustmentPPBTotal].FirstValue,
+ c.perfDataObject[0].ClockFrequencyAdjustmentPPBTotal,
)
ch <- prometheus.MustNewConstMetric(
c.computedTimeOffset,
prometheus.GaugeValue,
- data[ComputedTimeOffset].FirstValue/1000000, // microseconds -> seconds
+ c.perfDataObject[0].ComputedTimeOffset/1000000, // microseconds -> seconds
)
ch <- prometheus.MustNewConstMetric(
c.ntpClientTimeSourceCount,
prometheus.GaugeValue,
- data[NTPClientTimeSourceCount].FirstValue,
+ c.perfDataObject[0].NTPClientTimeSourceCount,
)
ch <- prometheus.MustNewConstMetric(
c.ntpRoundTripDelay,
prometheus.GaugeValue,
- data[NTPRoundTripDelay].FirstValue/1000000, // microseconds -> seconds
+ c.perfDataObject[0].NTPRoundTripDelay/1000000, // microseconds -> seconds
)
ch <- prometheus.MustNewConstMetric(
c.ntpServerIncomingRequestsTotal,
prometheus.CounterValue,
- data[NTPServerIncomingRequestsTotal].FirstValue,
+ c.perfDataObject[0].NTPServerIncomingRequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ntpServerOutgoingResponsesTotal,
prometheus.CounterValue,
- data[NTPServerOutgoingResponsesTotal].FirstValue,
+ c.perfDataObject[0].NTPServerOutgoingResponsesTotal,
)
return nil
diff --git a/internal/collector/time/const.go b/internal/collector/time/types.go
similarity index 54%
rename from internal/collector/time/const.go
rename to internal/collector/time/types.go
index 16ca6a398..3bed16b70 100644
--- a/internal/collector/time/const.go
+++ b/internal/collector/time/types.go
@@ -15,11 +15,11 @@
package time
-const (
- ClockFrequencyAdjustmentPPBTotal = "Clock Frequency Adjustment (ppb)"
- ComputedTimeOffset = "Computed Time Offset"
- NTPClientTimeSourceCount = "NTP Client Time Source Count"
- NTPRoundTripDelay = "NTP Roundtrip Delay"
- NTPServerIncomingRequestsTotal = "NTP Server Incoming Requests"
- NTPServerOutgoingResponsesTotal = "NTP Server Outgoing Responses"
-)
+type perfDataCounterValues struct {
+ ClockFrequencyAdjustmentPPBTotal float64 `perfdata:"Clock Frequency Adjustment (ppb)"`
+ ComputedTimeOffset float64 `perfdata:"Computed Time Offset"`
+ NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"`
+ NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"`
+ NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"`
+ NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"`
+}
diff --git a/internal/collector/udp/const.go b/internal/collector/udp/types.go
similarity index 80%
rename from internal/collector/udp/const.go
rename to internal/collector/udp/types.go
index 15facf8f7..9d4e8ca29 100644
--- a/internal/collector/udp/const.go
+++ b/internal/collector/udp/types.go
@@ -17,12 +17,12 @@ package udp
// The TCPv6 performance object uses the same fields.
// https://learn.microsoft.com/en-us/dotnet/api/system.net.networkinformation.tcpstate?view=net-8.0.
-const (
- datagramsNoPortPerSec = "Datagrams No Port/sec"
- datagramsReceivedPerSec = "Datagrams Received/sec"
- datagramsReceivedErrors = "Datagrams Received Errors"
- datagramsSentPerSec = "Datagrams Sent/sec"
-)
+type perfDataCounterValues struct {
+ DatagramsNoPortPerSec float64 `perfdata:"Datagrams No Port/sec"`
+ DatagramsReceivedPerSec float64 `perfdata:"Datagrams Received/sec"`
+ DatagramsReceivedErrors float64 `perfdata:"Datagrams Received Errors"`
+ DatagramsSentPerSec float64 `perfdata:"Datagrams Sent/sec"`
+}
// Datagrams No Port/sec is the rate of received UDP datagrams for which there was no application at the destination port.
// Datagrams Received Errors is the number of received UDP datagrams that could not be delivered for reasons other than the lack of an application at the destination port.
diff --git a/internal/collector/udp/udp.go b/internal/collector/udp/udp.go
index 74c9cc0d5..6f8ef7664 100644
--- a/internal/collector/udp/udp.go
+++ b/internal/collector/udp/udp.go
@@ -16,13 +16,12 @@
package udp
import (
- "errors"
"fmt"
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -38,8 +37,10 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
- perfDataCollector4 *perfdata.Collector
- perfDataCollector6 *perfdata.Collector
+ perfDataCollector4 *pdh.Collector
+ perfDataCollector6 *pdh.Collector
+ perfDataObject4 []perfDataCounterValues
+ perfDataObject6 []perfDataCounterValues
datagramsNoPortTotal *prometheus.Desc
datagramsReceivedTotal *prometheus.Desc
@@ -79,21 +80,14 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
- counters := []string{
- datagramsNoPortPerSec,
- datagramsReceivedPerSec,
- datagramsReceivedErrors,
- datagramsSentPerSec,
- }
-
var err error
- c.perfDataCollector4, err = perfdata.NewCollector("UDPv4", nil, counters)
+ c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues]("UDPv4", nil)
if err != nil {
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
}
- c.perfDataCollector6, err = perfdata.NewCollector("UDPv6", nil, counters)
+ c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues]("UDPv6", nil)
if err != nil {
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
}
@@ -133,54 +127,46 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
- data, err := c.perfDataCollector4.Collect()
+ err := c.perfDataCollector4.Collect(&c.perfDataObject4)
if err != nil {
return fmt.Errorf("failed to collect UDPv4 metrics: %w", err)
}
- if _, ok := data[perfdata.InstanceEmpty]; !ok {
- return errors.New("no data for UDPv4")
- }
-
- c.writeUDPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv4"})
+ c.writeUDPCounters(ch, c.perfDataObject4, []string{"ipv4"})
- data, err = c.perfDataCollector6.Collect()
+ err = c.perfDataCollector6.Collect(&c.perfDataObject6)
if err != nil {
return fmt.Errorf("failed to collect UDPv6 metrics: %w", err)
}
- if _, ok := data[perfdata.InstanceEmpty]; !ok {
- return errors.New("no data for UDPv6")
- }
-
- c.writeUDPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv6"})
+ c.writeUDPCounters(ch, c.perfDataObject6, []string{"ipv6"})
return nil
}
-func (c *Collector) writeUDPCounters(ch chan<- prometheus.Metric, metrics map[string]perfdata.CounterValue, labels []string) {
+func (c *Collector) writeUDPCounters(ch chan<- prometheus.Metric, metrics []perfDataCounterValues, labels []string) {
ch <- prometheus.MustNewConstMetric(
c.datagramsNoPortTotal,
prometheus.CounterValue,
- metrics[datagramsNoPortPerSec].FirstValue,
+ metrics[0].DatagramsNoPortPerSec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.datagramsReceivedErrorsTotal,
prometheus.CounterValue,
- metrics[datagramsReceivedErrors].FirstValue,
+ metrics[0].DatagramsReceivedErrors,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.datagramsReceivedTotal,
prometheus.GaugeValue,
- metrics[datagramsReceivedPerSec].FirstValue,
+ metrics[0].DatagramsReceivedPerSec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.datagramsSentTotal,
prometheus.CounterValue,
- metrics[datagramsSentPerSec].FirstValue,
+ metrics[0].DatagramsSentPerSec,
labels...,
)
}
diff --git a/internal/collector/vmware/const.go b/internal/collector/vmware/const.go
deleted file mode 100644
index 160989704..000000000
--- a/internal/collector/vmware/const.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package vmware
-
-const (
- couEffectiveVMSpeedMHz = "Effective VM Speed in MHz" // \VM Processor(*)\Effective VM Speed in MHz
- cpuHostProcessorSpeedMHz = "Host processor speed in MHz" // \VM Processor(*)\Host processor speed in MHz
- cpuLimitMHz = "Limit in MHz" // \VM Processor(*)\Limit in MHz
- cpuReservationMHz = "Reservation in MHz" // \VM Processor(*)\Reservation in MHz
- cpuShares = "Shares" // \VM Processor(*)\Shares
- cpuStolenMs = "CPU stolen time" // \VM Processor(*)\CPU stolen time
- cpuTimePercents = "% Processor Time" // \VM Processor(*)\% Processor Time
-
- memActiveMB = "Memory Active in MB" // \VM Memory\Memory Active in MB
- memBalloonedMB = "Memory Ballooned in MB" // \VM Memory\Memory Ballooned in MB
- memLimitMB = "Memory Limit in MB" // \VM Memory\Memory Limit in MB
- memMappedMB = "Memory Mapped in MB" // \VM Memory\Memory Mapped in MB
- memOverheadMB = "Memory Overhead in MB" // \VM Memory\Memory Overhead in MB
- memReservationMB = "Memory Reservation in MB" // \VM Memory\Memory Reservation in MB
- memSharedMB = "Memory Shared in MB" // \VM Memory\Memory Shared in MB
- memSharedSavedMB = "Memory Shared Saved in MB" // \VM Memory\Memory Shared Saved in MB
- memShares = "Memory Shares" // \VM Memory\Memory Shares
- memSwappedMB = "Memory Swapped in MB" // \VM Memory\Memory Swapped in MB
- memTargetSizeMB = "Memory Target Size" // \VM Memory\Memory Target Size
- memUsedMB = "Memory Used in MB" // \VM Memory\Memory Used in MB
-)
diff --git a/internal/collector/vmware/types.go b/internal/collector/vmware/types.go
new file mode 100644
index 000000000..e9a19e15f
--- /dev/null
+++ b/internal/collector/vmware/types.go
@@ -0,0 +1,41 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package vmware
+
+type perfDataCounterValuesCPU struct {
+ CouEffectiveVMSpeedMHz float64 `perfdata:"Effective VM Speed in MHz"` // \VM Processor(*)\Effective VM Speed in MHz
+ CpuHostProcessorSpeedMHz float64 `perfdata:"Host processor speed in MHz"` // \VM Processor(*)\Host processor speed in MHz
+ CpuLimitMHz float64 `perfdata:"Limit in MHz"` // \VM Processor(*)\Limit in MHz
+ CpuReservationMHz float64 `perfdata:"Reservation in MHz"` // \VM Processor(*)\Reservation in MHz
+ CpuShares float64 `perfdata:"Shares"` // \VM Processor(*)\Shares
+ CpuStolenMs float64 `perfdata:"CPU stolen time"` // \VM Processor(*)\CPU stolen time
+ CpuTimePercents float64 `perfdata:"% Processor Time"` // \VM Processor(*)\% Processor Time
+}
+
+type perfDataCounterValuesMemory struct {
+ MemActiveMB float64 `perfdata:"Memory Active in MB"` // \VM Memory\Memory Active in MB
+ MemBalloonedMB float64 `perfdata:"Memory Ballooned in MB"` // \VM Memory\Memory Ballooned in MB
+ MemLimitMB float64 `perfdata:"Memory Limit in MB"` // \VM Memory\Memory Limit in MB
+ MemMappedMB float64 `perfdata:"Memory Mapped in MB"` // \VM Memory\Memory Mapped in MB
+ MemOverheadMB float64 `perfdata:"Memory Overhead in MB"` // \VM Memory\Memory Overhead in MB
+ MemReservationMB float64 `perfdata:"Memory Reservation in MB"` // \VM Memory\Memory Reservation in MB
+ MemSharedMB float64 `perfdata:"Memory Shared in MB"` // \VM Memory\Memory Shared in MB
+ MemSharedSavedMB float64 `perfdata:"Memory Shared Saved in MB"` // \VM Memory\Memory Shared Saved in MB
+ MemShares float64 `perfdata:"Memory Shares"` // \VM Memory\Memory Shares
+ MemSwappedMB float64 `perfdata:"Memory Swapped in MB"` // \VM Memory\Memory Swapped in MB
+ MemTargetSizeMB float64 `perfdata:"Memory Target Size"` // \VM Memory\Memory Target Size
+ MemUsedMB float64 `perfdata:"Memory Used in MB"` // \VM Memory\Memory Used in MB
+}
diff --git a/internal/collector/vmware/vmware.go b/internal/collector/vmware/vmware.go
index 2aaf5687a..29e7ea13f 100644
--- a/internal/collector/vmware/vmware.go
+++ b/internal/collector/vmware/vmware.go
@@ -22,7 +22,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@@ -38,8 +38,10 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics.
type Collector struct {
config Config
- perfDataCollectorCPU *perfdata.Collector
- perfDataCollectorMemory *perfdata.Collector
+ perfDataCollectorCPU *pdh.Collector
+ perfDataCollectorMemory *pdh.Collector
+ perfDataObjectCPU []perfDataCounterValuesCPU
+ perfDataObjectMemory []perfDataCounterValuesMemory
memActive *prometheus.Desc
memBallooned *prometheus.Desc
@@ -93,15 +95,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
- c.perfDataCollectorCPU, err = perfdata.NewCollector("VM Processor", perfdata.InstancesTotal, []string{
- cpuLimitMHz,
- cpuReservationMHz,
- cpuShares,
- cpuStolenMs,
- cpuTimePercents,
- couEffectiveVMSpeedMHz,
- cpuHostProcessorSpeedMHz,
- })
+ c.perfDataCollectorCPU, err = pdh.NewCollector[perfDataCounterValuesCPU]("VM Processor", pdh.InstancesTotal)
if err != nil {
return fmt.Errorf("failed to create VM Processor collector: %w", err)
}
@@ -149,20 +143,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
- c.perfDataCollectorMemory, err = perfdata.NewCollector("VM Memory", nil, []string{
- memActiveMB,
- memBalloonedMB,
- memLimitMB,
- memMappedMB,
- memOverheadMB,
- memReservationMB,
- memSharedMB,
- memSharedSavedMB,
- memShares,
- memSwappedMB,
- memTargetSizeMB,
- memUsedMB,
- })
+ c.perfDataCollectorMemory, err = pdh.NewCollector[perfDataCounterValuesMemory]("VM Memory", nil)
if err != nil {
return fmt.Errorf("failed to create VM Memory collector: %w", err)
}
@@ -260,142 +241,132 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectMem(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorMemory.Collect()
+ err := c.perfDataCollectorMemory.Collect(&c.perfDataObjectMemory)
if err != nil {
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceEmpty]
- if !ok {
- return fmt.Errorf("failed to collect VM Memory metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.memActive,
prometheus.GaugeValue,
- utils.MBToBytes(data[memActiveMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemActiveMB),
)
ch <- prometheus.MustNewConstMetric(
c.memBallooned,
prometheus.GaugeValue,
- utils.MBToBytes(data[memBalloonedMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemBalloonedMB),
)
ch <- prometheus.MustNewConstMetric(
c.memLimit,
prometheus.GaugeValue,
- utils.MBToBytes(data[memLimitMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemLimitMB),
)
ch <- prometheus.MustNewConstMetric(
c.memMapped,
prometheus.GaugeValue,
- utils.MBToBytes(data[memMappedMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemMappedMB),
)
ch <- prometheus.MustNewConstMetric(
c.memOverhead,
prometheus.GaugeValue,
- utils.MBToBytes(data[memOverheadMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemOverheadMB),
)
ch <- prometheus.MustNewConstMetric(
c.memReservation,
prometheus.GaugeValue,
- utils.MBToBytes(data[memReservationMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemReservationMB),
)
ch <- prometheus.MustNewConstMetric(
c.memShared,
prometheus.GaugeValue,
- utils.MBToBytes(data[memSharedMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemSharedMB),
)
ch <- prometheus.MustNewConstMetric(
c.memSharedSaved,
prometheus.GaugeValue,
- utils.MBToBytes(data[memSharedSavedMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemSharedSavedMB),
)
ch <- prometheus.MustNewConstMetric(
c.memShares,
prometheus.GaugeValue,
- data[memShares].FirstValue,
+ c.perfDataObjectMemory[0].MemShares,
)
ch <- prometheus.MustNewConstMetric(
c.memSwapped,
prometheus.GaugeValue,
- utils.MBToBytes(data[memSwappedMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemSwappedMB),
)
ch <- prometheus.MustNewConstMetric(
c.memTargetSize,
prometheus.GaugeValue,
- utils.MBToBytes(data[memTargetSizeMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemTargetSizeMB),
)
ch <- prometheus.MustNewConstMetric(
c.memUsed,
prometheus.GaugeValue,
- utils.MBToBytes(data[memUsedMB].FirstValue),
+ utils.MBToBytes(c.perfDataObjectMemory[0].MemUsedMB),
)
return nil
}
func (c *Collector) collectCpu(ch chan<- prometheus.Metric) error {
- perfData, err := c.perfDataCollectorCPU.Collect()
+ err := c.perfDataCollectorCPU.Collect(&c.perfDataObjectCPU)
if err != nil {
return fmt.Errorf("failed to collect VM CPU metrics: %w", err)
}
- data, ok := perfData[perfdata.InstanceTotal]
- if !ok {
- return fmt.Errorf("failed to collect VM CPU metrics: %w", types.ErrNoData)
- }
-
ch <- prometheus.MustNewConstMetric(
c.cpuLimitMHz,
prometheus.GaugeValue,
- data[cpuLimitMHz].FirstValue,
+ c.perfDataObjectCPU[0].CpuLimitMHz,
)
ch <- prometheus.MustNewConstMetric(
c.cpuReservationMHz,
prometheus.GaugeValue,
- data[cpuReservationMHz].FirstValue,
+ c.perfDataObjectCPU[0].CpuReservationMHz,
)
ch <- prometheus.MustNewConstMetric(
c.cpuShares,
prometheus.GaugeValue,
- data[cpuShares].FirstValue,
+ c.perfDataObjectCPU[0].CpuShares,
)
ch <- prometheus.MustNewConstMetric(
c.cpuStolenTotal,
prometheus.CounterValue,
- utils.MilliSecToSec(data[cpuStolenMs].FirstValue),
+ utils.MilliSecToSec(c.perfDataObjectCPU[0].CpuStolenMs),
)
ch <- prometheus.MustNewConstMetric(
c.cpuTimeTotal,
prometheus.CounterValue,
- utils.MilliSecToSec(data[cpuTimePercents].FirstValue),
+ utils.MilliSecToSec(c.perfDataObjectCPU[0].CpuTimePercents),
)
ch <- prometheus.MustNewConstMetric(
c.cpuEffectiveVMSpeedMHz,
prometheus.GaugeValue,
- data[couEffectiveVMSpeedMHz].FirstValue,
+ c.perfDataObjectCPU[0].CouEffectiveVMSpeedMHz,
)
ch <- prometheus.MustNewConstMetric(
c.hostProcessorSpeedMHz,
prometheus.GaugeValue,
- data[cpuHostProcessorSpeedMHz].FirstValue,
+ c.perfDataObjectCPU[0].CpuHostProcessorSpeedMHz,
)
return nil
diff --git a/internal/config/config.go b/internal/config/config.go
index c9e87c227..5a2b0d278 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -39,19 +39,19 @@ type Resolver struct {
}
// NewResolver returns a Resolver structure.
-func NewResolver(file string, logger *slog.Logger, insecureSkipVerify bool) (*Resolver, error) {
+func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) (*Resolver, error) {
flags := map[string]string{}
var fileBytes []byte
var err error
if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") {
- fileBytes, err = readFromURL(file, logger, insecureSkipVerify)
+ fileBytes, err = readFromURL(ctx, file, logger, insecureSkipVerify)
if err != nil {
return nil, err
}
} else {
- fileBytes, err = readFromFile(file, logger)
+ fileBytes, err = readFromFile(ctx, file, logger)
if err != nil {
return nil, err
}
@@ -75,8 +75,8 @@ func NewResolver(file string, logger *slog.Logger, insecureSkipVerify bool) (*Re
return &Resolver{flags: flags}, nil
}
-func readFromFile(file string, logger *slog.Logger) ([]byte, error) {
- logger.Info("Loading configuration file: " + file)
+func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte, error) {
+ logger.InfoContext(ctx, "loading configuration file: "+file)
if _, err := os.Stat(file); err != nil {
return nil, fmt.Errorf("failed to read configuration file: %w", err)
@@ -90,20 +90,20 @@ func readFromFile(file string, logger *slog.Logger) ([]byte, error) {
return fileBytes, nil
}
-func readFromURL(file string, logger *slog.Logger, insecureSkipVerify bool) ([]byte, error) {
- logger.Info("Loading configuration file from URL: " + file)
+func readFromURL(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) ([]byte, error) {
+ logger.InfoContext(ctx, "loading configuration file from URL: "+file)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, //nolint:gosec
}
if insecureSkipVerify {
- logger.Warn("Loading configuration file with TLS verification disabled")
+ logger.WarnContext(ctx, "Loading configuration file with TLS verification disabled")
}
client := &http.Client{Transport: tr}
- req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, file, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, file, nil)
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %w", err)
}
diff --git a/internal/headers/schedule_service/schedule_service.go b/internal/headers/schedule_service/schedule_service.go
index 1b333dc25..a121649a3 100644
--- a/internal/headers/schedule_service/schedule_service.go
+++ b/internal/headers/schedule_service/schedule_service.go
@@ -57,13 +57,13 @@ func (s *ScheduleService) Connect() error {
return err
}
- s.taskServiceObj = s.taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
-
s.taskService, err = oleutil.CallMethod(s.taskServiceObj, "Connect")
if err != nil {
return fmt.Errorf("failed to connect to task service: %w", err)
}
+ s.taskServiceObj = s.taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
+
return nil
}
diff --git a/internal/pdh/collector.go b/internal/pdh/collector.go
new file mode 100644
index 000000000..500f0e058
--- /dev/null
+++ b/internal/pdh/collector.go
@@ -0,0 +1,459 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package pdh
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "slices"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "github.com/prometheus-community/windows_exporter/internal/mi"
+ "github.com/prometheus/client_golang/prometheus"
+ "golang.org/x/sys/windows"
+)
+
+//nolint:gochecknoglobals
+var (
+ InstancesAll = []string{"*"}
+ InstancesTotal = []string{InstanceTotal}
+)
+
+type CounterValues = map[string]map[string]CounterValue
+
+type Collector struct {
+ object string
+ counters map[string]Counter
+ handle pdhQueryHandle
+ totalCounterRequested bool
+ mu sync.RWMutex
+
+ nameIndexValue int
+ metricsTypeIndexValue int
+
+ collectCh chan any
+ errorCh chan error
+}
+
+type Counter struct {
+ Name string
+ Desc string
+ MetricType prometheus.ValueType
+ Instances map[string]pdhCounterHandle
+ Type uint32
+ Frequency int64
+
+ FieldIndexValue int
+ FieldIndexSecondValue int
+}
+
+func NewCollector[T any](object string, instances []string) (*Collector, error) {
+ valueType := reflect.TypeFor[T]()
+
+ return NewCollectorWithReflection(object, instances, valueType)
+}
+
+func NewCollectorWithReflection(object string, instances []string, valueType reflect.Type) (*Collector, error) {
+ var handle pdhQueryHandle
+
+ if ret := OpenQuery(0, 0, &handle); ret != ErrorSuccess {
+ return nil, NewPdhError(ret)
+ }
+
+ if len(instances) == 0 {
+ instances = []string{InstanceEmpty}
+ }
+
+ collector := &Collector{
+ object: object,
+ counters: make(map[string]Counter, valueType.NumField()),
+ handle: handle,
+ totalCounterRequested: slices.Contains(instances, InstanceTotal),
+ mu: sync.RWMutex{},
+ nameIndexValue: -1,
+ metricsTypeIndexValue: -1,
+ }
+
+ errs := make([]error, 0, valueType.NumField())
+
+ if f, ok := valueType.FieldByName("Name"); ok {
+ if f.Type.Kind() == reflect.String {
+ collector.nameIndexValue = f.Index[0]
+ }
+ }
+
+ if f, ok := valueType.FieldByName("MetricType"); ok {
+ if f.Type.Kind() == reflect.TypeOf(prometheus.ValueType(0)).Kind() {
+ collector.metricsTypeIndexValue = f.Index[0]
+ }
+ }
+
+ for _, f := range reflect.VisibleFields(valueType) {
+ counterName, ok := f.Tag.Lookup("perfdata")
+ if !ok {
+ continue
+ }
+
+ if f.Type.Kind() != reflect.Float64 {
+ errs = append(errs, fmt.Errorf("field %s must be a float64", f.Name))
+
+ continue
+ }
+
+ var counter Counter
+ if counter, ok = collector.counters[counterName]; !ok {
+ counter = Counter{
+ Name: counterName,
+ Instances: make(map[string]pdhCounterHandle, len(instances)),
+ FieldIndexSecondValue: -1,
+ FieldIndexValue: -1,
+ }
+ }
+
+ if strings.HasSuffix(counterName, ",secondvalue") {
+ counterName = strings.TrimSuffix(counterName, ",secondvalue")
+
+ counter.FieldIndexSecondValue = f.Index[0]
+ } else {
+ counter.FieldIndexValue = f.Index[0]
+ }
+
+ if len(counter.Instances) != 0 {
+ collector.counters[counterName] = counter
+
+ continue
+ }
+
+ var counterPath string
+
+ for _, instance := range instances {
+ counterPath = formatCounterPath(object, instance, counterName)
+
+ var counterHandle pdhCounterHandle
+
+ if ret := AddEnglishCounter(handle, counterPath, 0, &counterHandle); ret != ErrorSuccess {
+ errs = append(errs, fmt.Errorf("failed to add counter %s: %w", counterPath, NewPdhError(ret)))
+
+ continue
+ }
+
+ counter.Instances[instance] = counterHandle
+
+ if counter.Type != 0 {
+ continue
+ }
+
+ // Get the info with the current buffer size
+ bufLen := uint32(0)
+
+ if ret := GetCounterInfo(counterHandle, 0, &bufLen, nil); ret != MoreData {
+ errs = append(errs, fmt.Errorf("GetCounterInfo: %w", NewPdhError(ret)))
+
+ continue
+ }
+
+ buf := make([]byte, bufLen)
+ if ret := GetCounterInfo(counterHandle, 0, &bufLen, &buf[0]); ret != ErrorSuccess {
+ errs = append(errs, fmt.Errorf("GetCounterInfo: %w", NewPdhError(ret)))
+
+ continue
+ }
+
+ ci := (*CounterInfo)(unsafe.Pointer(&buf[0]))
+ counter.Type = ci.DwType
+ counter.Desc = windows.UTF16PtrToString(ci.SzExplainText)
+ counter.Desc = windows.UTF16PtrToString(ci.SzExplainText)
+
+ if val, ok := SupportedCounterTypes[counter.Type]; ok {
+ counter.MetricType = val
+ } else {
+ counter.MetricType = prometheus.GaugeValue
+ }
+
+ if counter.Type == PERF_ELAPSED_TIME {
+ if ret := GetCounterTimeBase(counterHandle, &counter.Frequency); ret != ErrorSuccess {
+ errs = append(errs, fmt.Errorf("GetCounterTimeBase: %w", NewPdhError(ret)))
+
+ continue
+ }
+ }
+ }
+
+ collector.counters[counterName] = counter
+ }
+
+ if err := errors.Join(errs...); err != nil {
+ return collector, fmt.Errorf("failed to initialize collector: %w", err)
+ }
+
+ if len(collector.counters) == 0 {
+ return nil, errors.New("no counters configured")
+ }
+
+ collector.collectCh = make(chan any)
+ collector.errorCh = make(chan error)
+
+ go collector.collectRoutine()
+
+ // Collect initial data because some counters need to be read twice to get the correct value.
+ collectValues := reflect.New(reflect.SliceOf(valueType)).Elem()
+ if err := collector.Collect(collectValues.Addr().Interface()); err != nil && !errors.Is(err, ErrNoData) {
+ return collector, fmt.Errorf("failed to collect initial data: %w", err)
+ }
+
+ return collector, nil
+}
+
+func (c *Collector) Describe() map[string]string {
+ if c == nil {
+ return map[string]string{}
+ }
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ desc := make(map[string]string, len(c.counters))
+
+ for _, counter := range c.counters {
+ desc[counter.Name] = counter.Desc
+ }
+
+ return desc
+}
+
+func (c *Collector) Collect(dst any) error {
+ if c == nil {
+ return ErrPerformanceCounterNotInitialized
+ }
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if len(c.counters) == 0 || c.handle == 0 || c.collectCh == nil || c.errorCh == nil {
+ return ErrPerformanceCounterNotInitialized
+ }
+
+ c.collectCh <- dst
+
+ return <-c.errorCh
+}
+
+func (c *Collector) collectRoutine() {
+ var (
+ err error
+ itemCount uint32
+ items []RawCounterItem
+ bytesNeeded uint32
+ )
+
+ buf := make([]byte, 1)
+
+ for data := range c.collectCh {
+ err = (func() error {
+ if ret := CollectQueryData(c.handle); ret != ErrorSuccess {
+ return fmt.Errorf("failed to collect query data: %w", NewPdhError(ret))
+ }
+
+ dv := reflect.ValueOf(data)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return fmt.Errorf("expected a pointer, got %s: %w", dv.Kind(), mi.ErrInvalidEntityType)
+ }
+
+ dv = dv.Elem()
+
+ if dv.Kind() != reflect.Slice {
+ return fmt.Errorf("expected a pointer to a slice, got %s: %w", dv.Kind(), mi.ErrInvalidEntityType)
+ }
+
+ elemType := dv.Type().Elem()
+
+ if elemType.Kind() != reflect.Struct {
+ return fmt.Errorf("expected a pointer to a slice of structs, got a slice of %s: %w", elemType.Kind(), mi.ErrInvalidEntityType)
+ }
+
+ if dv.Len() != 0 {
+ dv.Set(reflect.MakeSlice(dv.Type(), 0, 0))
+ }
+
+ dv.Clear()
+
+ elemValue := reflect.ValueOf(reflect.New(elemType).Interface()).Elem()
+
+ indexMap := map[string]int{}
+ stringMap := map[*uint16]string{}
+
+ for _, counter := range c.counters {
+ for _, instance := range counter.Instances {
+ // Get the info with the current buffer size
+ bytesNeeded = uint32(cap(buf))
+
+ for {
+ ret := GetRawCounterArray(instance, &bytesNeeded, &itemCount, &buf[0])
+
+ if ret == ErrorSuccess {
+ break
+ }
+
+ if err := NewPdhError(ret); ret != MoreData && !isKnownCounterDataError(err) {
+ return fmt.Errorf("GetRawCounterArray: %w", err)
+ }
+
+ if bytesNeeded <= uint32(cap(buf)) {
+ return fmt.Errorf("GetRawCounterArray reports buffer too small (%d), but buffer is large enough (%d): %w", uint32(cap(buf)), bytesNeeded, NewPdhError(ret))
+ }
+
+ buf = make([]byte, bytesNeeded)
+ }
+
+ items = unsafe.Slice((*RawCounterItem)(unsafe.Pointer(&buf[0])), itemCount)
+
+ var (
+ instanceName string
+ ok bool
+ )
+
+ for _, item := range items {
+ if item.RawValue.CStatus != CstatusValidData && item.RawValue.CStatus != CstatusNewData {
+ continue
+ }
+
+ if instanceName, ok = stringMap[item.SzName]; !ok {
+ instanceName = windows.UTF16PtrToString(item.SzName)
+ stringMap[item.SzName] = instanceName
+ }
+
+ if strings.HasSuffix(instanceName, InstanceTotal) && !c.totalCounterRequested {
+ continue
+ }
+
+ if instanceName == "" || instanceName == "*" {
+ instanceName = InstanceEmpty
+ }
+
+ var (
+ index int
+ ok bool
+ )
+
+ if index, ok = indexMap[instanceName]; !ok {
+ index = dv.Len()
+ indexMap[instanceName] = index
+
+ if c.nameIndexValue != -1 {
+ elemValue.Field(c.nameIndexValue).SetString(instanceName)
+ }
+
+ if c.metricsTypeIndexValue != -1 {
+ var metricsType prometheus.ValueType
+ if metricsType, ok = SupportedCounterTypes[counter.Type]; !ok {
+ metricsType = prometheus.GaugeValue
+ }
+
+ elemValue.Field(c.metricsTypeIndexValue).Set(reflect.ValueOf(metricsType))
+ }
+
+ dv.Set(reflect.Append(dv, elemValue))
+ }
+
+ // This is a workaround for the issue with the elapsed time counter type.
+ // Source: https://github.com/prometheus-community/windows_exporter/pull/335/files#diff-d5d2528f559ba2648c2866aec34b1eaa5c094dedb52bd0ff22aa5eb83226bd8dR76-R83
+ // Ref: https://learn.microsoft.com/en-us/windows/win32/perfctrs/calculating-counter-values
+ switch counter.Type {
+ case PERF_ELAPSED_TIME:
+ dv.Index(index).
+ Field(counter.FieldIndexValue).
+ SetFloat(float64((item.RawValue.FirstValue - WindowsEpoch) / counter.Frequency))
+ case PERF_100NSEC_TIMER, PERF_PRECISION_100NS_TIMER:
+ dv.Index(index).
+ Field(counter.FieldIndexValue).
+ SetFloat(float64(item.RawValue.FirstValue) * TicksToSecondScaleFactor)
+ default:
+ if counter.FieldIndexSecondValue != -1 {
+ dv.Index(index).
+ Field(counter.FieldIndexSecondValue).
+ SetFloat(float64(item.RawValue.SecondValue))
+ }
+
+ if counter.FieldIndexValue != -1 {
+ dv.Index(index).
+ Field(counter.FieldIndexValue).
+ SetFloat(float64(item.RawValue.FirstValue))
+ }
+ }
+ }
+ }
+ }
+
+ if dv.Len() == 0 {
+ return ErrNoData
+ }
+
+ return nil
+ })()
+
+ c.errorCh <- err
+ }
+}
+
+func (c *Collector) Close() {
+ if c == nil {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ CloseQuery(c.handle)
+
+ c.handle = 0
+
+ if c.collectCh != nil {
+ close(c.collectCh)
+ }
+
+ if c.errorCh != nil {
+ close(c.errorCh)
+ }
+
+ c.collectCh = nil
+ c.errorCh = nil
+}
+
+func formatCounterPath(object, instance, counterName string) string {
+ var counterPath string
+
+ if instance == InstanceEmpty {
+ counterPath = fmt.Sprintf(`\%s\%s`, object, counterName)
+ } else {
+ counterPath = fmt.Sprintf(`\%s(%s)\%s`, object, instance, counterName)
+ }
+
+ return counterPath
+}
+
+func isKnownCounterDataError(err error) bool {
+ var pdhErr *Error
+
+ return errors.As(err, &pdhErr) && (pdhErr.ErrorCode == InvalidData ||
+ pdhErr.ErrorCode == CalcNegativeDenominator ||
+ pdhErr.ErrorCode == CalcNegativeValue ||
+ pdhErr.ErrorCode == CstatusInvalidData ||
+ pdhErr.ErrorCode == CstatusNoInstance ||
+ pdhErr.ErrorCode == NoData)
+}
diff --git a/internal/pdh/collector_bench_test.go b/internal/pdh/collector_bench_test.go
new file mode 100644
index 000000000..70d3ac9ba
--- /dev/null
+++ b/internal/pdh/collector_bench_test.go
@@ -0,0 +1,71 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package pdh_test
+
+import (
+ "testing"
+
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+ "github.com/stretchr/testify/require"
+)
+
+type processFull struct {
+ Name string
+
+ ProcessorTime float64 `pdh:"% Processor Time"`
+ PrivilegedTime float64 `pdh:"% Privileged Time"`
+ UserTime float64 `pdh:"% User Time"`
+ CreatingProcessID float64 `pdh:"Creating Process ID"`
+ ElapsedTime float64 `pdh:"Elapsed Time"`
+ HandleCount float64 `pdh:"Handle Count"`
+ IDProcess float64 `pdh:"ID Process"`
+ IODataBytesSec float64 `pdh:"IO Data Bytes/sec"`
+ IODataOperationsSec float64 `pdh:"IO Data Operations/sec"`
+ IOOtherBytesSec float64 `pdh:"IO Other Bytes/sec"`
+ IOOtherOperationsSec float64 `pdh:"IO Other Operations/sec"`
+ IOReadBytesSec float64 `pdh:"IO Read Bytes/sec"`
+ IOReadOperationsSec float64 `pdh:"IO Read Operations/sec"`
+ IOWriteBytesSec float64 `pdh:"IO Write Bytes/sec"`
+ IOWriteOperationsSec float64 `pdh:"IO Write Operations/sec"`
+ PageFaultsSec float64 `pdh:"Page Faults/sec"`
+ PageFileBytesPeak float64 `pdh:"Page File Bytes Peak"`
+ PageFileBytes float64 `pdh:"Page File Bytes"`
+ PoolNonpagedBytes float64 `pdh:"Pool Nonpaged Bytes"`
+ PoolPagedBytes float64 `pdh:"Pool Paged Bytes"`
+ PriorityBase float64 `pdh:"Priority Base"`
+ PrivateBytes float64 `pdh:"Private Bytes"`
+ ThreadCount float64 `pdh:"Thread Count"`
+ VirtualBytesPeak float64 `pdh:"Virtual Bytes Peak"`
+ VirtualBytes float64 `pdh:"Virtual Bytes"`
+ WorkingSetPrivate float64 `pdh:"Working Set - Private"`
+ WorkingSetPeak float64 `pdh:"Working Set Peak"`
+ WorkingSet float64 `pdh:"Working Set"`
+}
+
+func BenchmarkTestCollector(b *testing.B) {
+ performanceData, err := pdh.NewCollector[processFull]("Process", []string{"*"})
+ require.NoError(b, err)
+
+ var data []processFull
+
+ for i := 0; i < b.N; i++ {
+ _ = performanceData.Collect(&data)
+ }
+
+ performanceData.Close()
+
+ b.ReportAllocs()
+}
diff --git a/internal/pdh/collector_test.go b/internal/pdh/collector_test.go
new file mode 100644
index 000000000..11259a10b
--- /dev/null
+++ b/internal/pdh/collector_test.go
@@ -0,0 +1,71 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package pdh_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type process struct {
+ Name string
+ ThreadCount float64 `perfdata:"Thread Count"`
+}
+
+func TestCollector(t *testing.T) {
+ t.Parallel()
+
+ for _, tc := range []struct {
+ object string
+ instances []string
+ }{
+ {
+ object: "Process",
+ instances: []string{"*"},
+ },
+ } {
+ t.Run(tc.object, func(t *testing.T) {
+ t.Parallel()
+
+ performanceData, err := pdh.NewCollector[process](tc.object, tc.instances)
+ require.NoError(t, err)
+
+ time.Sleep(100 * time.Millisecond)
+
+ var data []process
+
+ err = performanceData.Collect(&data)
+ require.NoError(t, err)
+ require.NotEmpty(t, data)
+
+ err = performanceData.Collect(&data)
+ require.NoError(t, err)
+ require.NotEmpty(t, data)
+
+ for _, instance := range data {
+ if instance.Name == "Idle" || instance.Name == "Secure System" {
+ continue
+ }
+
+ assert.NotZerof(t, instance.ThreadCount, "object: %s, instance: %s, counter: %s", tc.object, instance, instance.ThreadCount)
+ }
+ })
+ }
+}
diff --git a/internal/perfdata/const.go b/internal/pdh/const.go
similarity index 98%
rename from internal/perfdata/const.go
rename to internal/pdh/const.go
index 97278a228..d275f5fce 100644
--- a/internal/perfdata/const.go
+++ b/internal/pdh/const.go
@@ -13,7 +13,7 @@
//go:build windows
-package perfdata
+package pdh
import "github.com/prometheus/client_golang/prometheus"
@@ -71,7 +71,7 @@ const (
)
//nolint:gochecknoglobals
-var supportedCounterTypes = map[uint32]prometheus.ValueType{
+var SupportedCounterTypes = map[uint32]prometheus.ValueType{
PERF_COUNTER_RAWCOUNT_HEX: prometheus.GaugeValue,
PERF_COUNTER_LARGE_RAWCOUNT_HEX: prometheus.GaugeValue,
PERF_COUNTER_RAWCOUNT: prometheus.GaugeValue,
diff --git a/internal/perfdata/error.go b/internal/pdh/error.go
similarity index 91%
rename from internal/perfdata/error.go
rename to internal/pdh/error.go
index 35e3d111a..a5e3e210f 100644
--- a/internal/perfdata/error.go
+++ b/internal/pdh/error.go
@@ -13,12 +13,12 @@
//go:build windows
-package perfdata
+package pdh
import "errors"
var (
- ErrNoData = NewPdhError(PdhNoData)
+ ErrNoData = NewPdhError(NoData)
ErrPerformanceCounterNotInitialized = errors.New("performance counter not initialized")
)
@@ -48,6 +48,6 @@ func (m *Error) Error() string {
func NewPdhError(code uint32) error {
return &Error{
ErrorCode: code,
- errorText: PdhFormatError(code),
+ errorText: FormatError(code),
}
}
diff --git a/internal/pdh/pdh.go b/internal/pdh/pdh.go
new file mode 100644
index 000000000..6260d476c
--- /dev/null
+++ b/internal/pdh/pdh.go
@@ -0,0 +1,633 @@
+// Copyright (c) 2010-2024 The win Authors. All rights reserved.
+// Copyright (c) 2024 The prometheus-community Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This is the official list of 'win' authors for copyright purposes.
+//
+// Alexander Neumann
+// Joseph Watson
+// Kevin Pors
+
+//go:build windows
+
+package pdh
+
+import (
+ "fmt"
+ "time"
+ "unsafe"
+
+ "github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
+ "golang.org/x/sys/windows"
+)
+
+// Error codes.
+const (
+ ErrorSuccess = 0
+ ErrorFailure = 1
+ ErrorInvalidFunction = 1
+)
+
+type (
+ HANDLE uintptr
+)
+
+// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h
+
+const (
+ CstatusValidData uint32 = 0x00000000 // The returned data is valid.
+ CstatusNewData uint32 = 0x00000001 // The return data value is valid and different from the last sample.
+ CstatusNoMachine uint32 = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline.
+ CstatusNoInstance uint32 = 0x800007D1
+ MoreData uint32 = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'.
+ CstatusItemNotValidated uint32 = 0x800007D3
+ Retry uint32 = 0x800007D4
+ NoData uint32 = 0x800007D5 // The query does not currently contain any counters (for example, limited access)
+ CalcNegativeDenominator uint32 = 0x800007D6
+ CalcNegativeTimebase uint32 = 0x800007D7
+ CalcNegativeValue uint32 = 0x800007D8
+ DialogCancelled uint32 = 0x800007D9
+ EndOfLogFile uint32 = 0x800007DA
+ AsyncQueryTimeout uint32 = 0x800007DB
+ CannotSetDefaultRealtimeDatasource uint32 = 0x800007DC
+ CstatusNoObject uint32 = 0xC0000BB8
+ CstatusNoCounter uint32 = 0xC0000BB9 // The specified counter could not be found.
+ CstatusInvalidData uint32 = 0xC0000BBA // The counter was successfully found, but the data returned is not valid.
+ MemoryAllocationFailure uint32 = 0xC0000BBB
+ InvalidHandle uint32 = 0xC0000BBC
+ InvalidArgument uint32 = 0xC0000BBD // Required argument is missing or incorrect.
+ FunctionNotFound uint32 = 0xC0000BBE
+ CstatusNoCountername uint32 = 0xC0000BBF
+ CstatusBadCountername uint32 = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path.
+ InvalidBuffer uint32 = 0xC0000BC1
+ InsufficientBuffer uint32 = 0xC0000BC2
+ CannotConnectMachine uint32 = 0xC0000BC3
+ InvalidPath uint32 = 0xC0000BC4
+ InvalidInstance uint32 = 0xC0000BC5
+ InvalidData uint32 = 0xC0000BC6 // specified counter does not contain valid data or a successful status code.
+ NoDialogData uint32 = 0xC0000BC7
+ CannotReadNameStrings uint32 = 0xC0000BC8
+ LogFileCreateError uint32 = 0xC0000BC9
+ LogFileOpenError uint32 = 0xC0000BCA
+ LogTypeNotFound uint32 = 0xC0000BCB
+ NoMoreData uint32 = 0xC0000BCC
+ EntryNotInLogFile uint32 = 0xC0000BCD
+ DataSourceIsLogFile uint32 = 0xC0000BCE
+ DataSourceIsRealTime uint32 = 0xC0000BCF
+ UnableReadLogHeader uint32 = 0xC0000BD0
+ FileNotFound uint32 = 0xC0000BD1
+ FileAlreadyExists uint32 = 0xC0000BD2
+ NotImplemented uint32 = 0xC0000BD3
+ StringNotFound uint32 = 0xC0000BD4
+ UnableMapNameFiles uint32 = 0x80000BD5
+ UnknownLogFormat uint32 = 0xC0000BD6
+ UnknownLogsvcCommand uint32 = 0xC0000BD7
+ LogsvcQueryNotFound uint32 = 0xC0000BD8
+ LogsvcNotOpened uint32 = 0xC0000BD9
+ WbemError uint32 = 0xC0000BDA
+ AccessDenied uint32 = 0xC0000BDB
+ LogFileTooSmall uint32 = 0xC0000BDC
+ InvalidDatasource uint32 = 0xC0000BDD
+ InvalidSqldb uint32 = 0xC0000BDE
+ NoCounters uint32 = 0xC0000BDF
+ SQLAllocFailed uint32 = 0xC0000BE0
+ SQLAllocconFailed uint32 = 0xC0000BE1
+ SQLExecDirectFailed uint32 = 0xC0000BE2
+ SQLFetchFailed uint32 = 0xC0000BE3
+ SQLRowcountFailed uint32 = 0xC0000BE4
+ SQLMoreResultsFailed uint32 = 0xC0000BE5
+ SQLConnectFailed uint32 = 0xC0000BE6
+ SQLBindFailed uint32 = 0xC0000BE7
+ CannotConnectWmiServer uint32 = 0xC0000BE8
+ PlaCollectionAlreadyRunning uint32 = 0xC0000BE9
+ PlaErrorScheduleOverlap uint32 = 0xC0000BEA
+ PlaCollectionNotFound uint32 = 0xC0000BEB
+ PlaErrorScheduleElapsed uint32 = 0xC0000BEC
+ PlaErrorNostart uint32 = 0xC0000BED
+ PlaErrorAlreadyExists uint32 = 0xC0000BEE
+ PlaErrorTypeMismatch uint32 = 0xC0000BEF
+ PlaErrorFilepath uint32 = 0xC0000BF0
+ PlaServiceError uint32 = 0xC0000BF1
+ PlaValidationError uint32 = 0xC0000BF2
+ PlaValidationWarning uint32 = 0x80000BF3
+ PlaErrorNameTooLong uint32 = 0xC0000BF4
+ InvalidSQLLogFormat uint32 = 0xC0000BF5
+ CounterAlreadyInQuery uint32 = 0xC0000BF6
+ BinaryLogCorrupt uint32 = 0xC0000BF7
+ LogSampleTooSmall uint32 = 0xC0000BF8
+ OsLaterVersion uint32 = 0xC0000BF9
+ OsEarlierVersion uint32 = 0xC0000BFA
+ IncorrectAppendTime uint32 = 0xC0000BFB
+ UnmatchedAppendCounter uint32 = 0xC0000BFC
+ SQLAlterDetailFailed uint32 = 0xC0000BFD
+ QueryPerfDataTimeout uint32 = 0xC0000BFE
+)
+
+//nolint:gochecknoglobals
+var Errors = map[uint32]string{
+ CstatusValidData: "PDH_CSTATUS_VALID_DATA",
+ CstatusNewData: "PDH_CSTATUS_NEW_DATA",
+ CstatusNoMachine: "PDH_CSTATUS_NO_MACHINE",
+ CstatusNoInstance: "PDH_CSTATUS_NO_INSTANCE",
+ MoreData: "PDH_MORE_DATA",
+ CstatusItemNotValidated: "PDH_CSTATUS_ITEM_NOT_VALIDATED",
+ Retry: "PDH_RETRY",
+ NoData: "PDH_NO_DATA",
+ CalcNegativeDenominator: "PDH_CALC_NEGATIVE_DENOMINATOR",
+ CalcNegativeTimebase: "PDH_CALC_NEGATIVE_TIMEBASE",
+ CalcNegativeValue: "PDH_CALC_NEGATIVE_VALUE",
+ DialogCancelled: "PDH_DIALOG_CANCELLED",
+ EndOfLogFile: "PDH_END_OF_LOG_FILE",
+ AsyncQueryTimeout: "PDH_ASYNC_QUERY_TIMEOUT",
+ CannotSetDefaultRealtimeDatasource: "PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE",
+ CstatusNoObject: "PDH_CSTATUS_NO_OBJECT",
+ CstatusNoCounter: "PDH_CSTATUS_NO_COUNTER",
+ CstatusInvalidData: "PDH_CSTATUS_INVALID_DATA",
+ MemoryAllocationFailure: "PDH_MEMORY_ALLOCATION_FAILURE",
+ InvalidHandle: "PDH_INVALID_HANDLE",
+ InvalidArgument: "PDH_INVALID_ARGUMENT",
+ FunctionNotFound: "PDH_FUNCTION_NOT_FOUND",
+ CstatusNoCountername: "PDH_CSTATUS_NO_COUNTERNAME",
+ CstatusBadCountername: "PDH_CSTATUS_BAD_COUNTERNAME",
+ InvalidBuffer: "PDH_INVALID_BUFFER",
+ InsufficientBuffer: "PDH_INSUFFICIENT_BUFFER",
+ CannotConnectMachine: "PDH_CANNOT_CONNECT_MACHINE",
+ InvalidPath: "PDH_INVALID_PATH",
+ InvalidInstance: "PDH_INVALID_INSTANCE",
+ InvalidData: "PDH_INVALID_DATA",
+ NoDialogData: "PDH_NO_DIALOG_DATA",
+ CannotReadNameStrings: "PDH_CANNOT_READ_NAME_STRINGS",
+ LogFileCreateError: "PDH_LOG_FILE_CREATE_ERROR",
+ LogFileOpenError: "PDH_LOG_FILE_OPEN_ERROR",
+ LogTypeNotFound: "PDH_LOG_TYPE_NOT_FOUND",
+ NoMoreData: "PDH_NO_MORE_DATA",
+ EntryNotInLogFile: "PDH_ENTRY_NOT_IN_LOG_FILE",
+ DataSourceIsLogFile: "PDH_DATA_SOURCE_IS_LOG_FILE",
+ DataSourceIsRealTime: "PDH_DATA_SOURCE_IS_REAL_TIME",
+ UnableReadLogHeader: "PDH_UNABLE_READ_LOG_HEADER",
+ FileNotFound: "PDH_FILE_NOT_FOUND",
+ FileAlreadyExists: "PDH_FILE_ALREADY_EXISTS",
+ NotImplemented: "PDH_NOT_IMPLEMENTED",
+ StringNotFound: "PDH_STRING_NOT_FOUND",
+ UnableMapNameFiles: "PDH_UNABLE_MAP_NAME_FILES",
+ UnknownLogFormat: "PDH_UNKNOWN_LOG_FORMAT",
+ UnknownLogsvcCommand: "PDH_UNKNOWN_LOGSVC_COMMAND",
+ LogsvcQueryNotFound: "PDH_LOGSVC_QUERY_NOT_FOUND",
+ LogsvcNotOpened: "PDH_LOGSVC_NOT_OPENED",
+ WbemError: "PDH_WBEM_ERROR",
+ AccessDenied: "PDH_ACCESS_DENIED",
+ LogFileTooSmall: "PDH_LOG_FILE_TOO_SMALL",
+ InvalidDatasource: "PDH_INVALID_DATASOURCE",
+ InvalidSqldb: "PDH_INVALID_SQLDB",
+ NoCounters: "PDH_NO_COUNTERS",
+ SQLAllocFailed: "PDH_SQL_ALLOC_FAILED",
+ SQLAllocconFailed: "PDH_SQL_ALLOCCON_FAILED",
+ SQLExecDirectFailed: "PDH_SQL_EXEC_DIRECT_FAILED",
+ SQLFetchFailed: "PDH_SQL_FETCH_FAILED",
+ SQLRowcountFailed: "PDH_SQL_ROWCOUNT_FAILED",
+ SQLMoreResultsFailed: "PDH_SQL_MORE_RESULTS_FAILED",
+ SQLConnectFailed: "PDH_SQL_CONNECT_FAILED",
+ SQLBindFailed: "PDH_SQL_BIND_FAILED",
+ CannotConnectWmiServer: "PDH_CANNOT_CONNECT_WMI_SERVER",
+ PlaCollectionAlreadyRunning: "PDH_PLA_COLLECTION_ALREADY_RUNNING",
+ PlaErrorScheduleOverlap: "PDH_PLA_ERROR_SCHEDULE_OVERLAP",
+ PlaCollectionNotFound: "PDH_PLA_COLLECTION_NOT_FOUND",
+ PlaErrorScheduleElapsed: "PDH_PLA_ERROR_SCHEDULE_ELAPSED",
+ PlaErrorNostart: "PDH_PLA_ERROR_NOSTART",
+ PlaErrorAlreadyExists: "PDH_PLA_ERROR_ALREADY_EXISTS",
+ PlaErrorTypeMismatch: "PDH_PLA_ERROR_TYPE_MISMATCH",
+ PlaErrorFilepath: "PDH_PLA_ERROR_FILEPATH",
+ PlaServiceError: "PDH_PLA_SERVICE_ERROR",
+ PlaValidationError: "PDH_PLA_VALIDATION_ERROR",
+ PlaValidationWarning: "PDH_PLA_VALIDATION_WARNING",
+ PlaErrorNameTooLong: "PDH_PLA_ERROR_NAME_TOO_LONG",
+ InvalidSQLLogFormat: "PDH_INVALID_SQL_LOG_FORMAT",
+ CounterAlreadyInQuery: "PDH_COUNTER_ALREADY_IN_QUERY",
+ BinaryLogCorrupt: "PDH_BINARY_LOG_CORRUPT",
+ LogSampleTooSmall: "PDH_LOG_SAMPLE_TOO_SMALL",
+ OsLaterVersion: "PDH_OS_LATER_VERSION",
+ OsEarlierVersion: "PDH_OS_EARLIER_VERSION",
+ IncorrectAppendTime: "PDH_INCORRECT_APPEND_TIME",
+ UnmatchedAppendCounter: "PDH_UNMATCHED_APPEND_COUNTER",
+ SQLAlterDetailFailed: "PDH_SQL_ALTER_DETAIL_FAILED",
+ QueryPerfDataTimeout: "PDH_QUERY_PERF_DATA_TIMEOUT",
+}
+
+// Formatting options for GetFormattedCounterValue().
+//
+//goland:noinspection GoUnusedConst
+const (
+ FmtRaw = 0x00000010
+ FmtAnsi = 0x00000020
+ FmtUnicode = 0x00000040
+ FmtLong = 0x00000100 // Return data as a long int.
+ FmtDouble = 0x00000200 // Return data as a double precision floating point real.
+ FmtLarge = 0x00000400 // Return data as a 64 bit integer.
+ FmtNoscale = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor.
+ Fmt1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000.
+ FmtNodata = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing.
+ FmtNocap100 = 0x00008000 // can be OR-ed: do not cap values > 100.
+ PerfDetailCostly = 0x00010000
+ PerfDetailStandard = 0x0000FFFF
+)
+
+type (
+ pdhQueryHandle HANDLE // query handle
+ pdhCounterHandle HANDLE // counter handle
+)
+
+//nolint:gochecknoglobals
+var (
+ libPdhDll = windows.NewLazySystemDLL("pdh.dll")
+
+ pdhAddCounterW = libPdhDll.NewProc("PdhAddCounterW")
+ pdhAddEnglishCounterW = libPdhDll.NewProc("PdhAddEnglishCounterW")
+ pdhCloseQuery = libPdhDll.NewProc("PdhCloseQuery")
+ pdhCollectQueryData = libPdhDll.NewProc("PdhCollectQueryData")
+ pdhCollectQueryDataWithTime = libPdhDll.NewProc("PdhCollectQueryDataWithTime")
+ pdhGetFormattedCounterValue = libPdhDll.NewProc("PdhGetFormattedCounterValue")
+ pdhGetFormattedCounterArrayW = libPdhDll.NewProc("PdhGetFormattedCounterArrayW")
+ pdhOpenQuery = libPdhDll.NewProc("PdhOpenQuery")
+ pdhValidatePathW = libPdhDll.NewProc("PdhValidatePathW")
+ pdhExpandWildCardPathW = libPdhDll.NewProc("PdhExpandWildCardPathW")
+ pdhGetCounterInfoW = libPdhDll.NewProc("PdhGetCounterInfoW")
+ pdhGetRawCounterValue = libPdhDll.NewProc("PdhGetRawCounterValue")
+ pdhGetRawCounterArrayW = libPdhDll.NewProc("PdhGetRawCounterArrayW")
+ pdhPdhGetCounterTimeBase = libPdhDll.NewProc("PdhGetCounterTimeBase")
+)
+
+// AddCounter adds the specified counter to the query. This is the internationalized version. Preferably, use the
+// function AddEnglishCounter instead. hQuery is the query handle, which has been fetched by OpenQuery.
+// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version).
+// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value
+// later, call GetCounterInfo() and access dwQueryUserData of the CounterInfo structure.
+//
+// Examples of szFullCounterPath (in an English version of Windows):
+//
+// \\Processor(_Total)\\% Idle Time
+// \\Processor(_Total)\\% Processor Time
+// \\LogicalDisk(C:)\% Free Space
+//
+// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility,
+// the typeperf command, and the v1 editor. perfmon.exe is perhaps the easiest way, because it's basically a
+// full implementation of the pdh.dll API, except with a GUI and all that. The v1 setting also provides an
+// interface to the available counters, and can be found at the following key:
+//
+// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage
+//
+// This v1 key contains several values as follows:
+//
+// 1
+// 1847
+// 2
+// System
+// 4
+// Memory
+// 6
+// % Processor Time
+// ... many, many more
+//
+// Somehow, these numeric values can be used as szFullCounterPath too:
+//
+// \2\6 will correspond to \\System\% Processor Time
+//
+// The typeperf command may also be pretty easy. To find all performance counters, simply execute:
+//
+// typeperf -qx
+func AddCounter(hQuery pdhQueryHandle, szFullCounterPath string, dwUserData uintptr, phCounter *pdhCounterHandle) uint32 {
+ ptxt, _ := windows.UTF16PtrFromString(szFullCounterPath)
+ ret, _, _ := pdhAddCounterW.Call(
+ uintptr(hQuery),
+ uintptr(unsafe.Pointer(ptxt)),
+ dwUserData,
+ uintptr(unsafe.Pointer(phCounter)))
+
+ return uint32(ret)
+}
+
+// AddEnglishCounter adds the specified language-neutral counter to the query. See the AddCounter function. This function only exists on
+// Windows versions higher than Vista.
+func AddEnglishCounter(hQuery pdhQueryHandle, szFullCounterPath string, dwUserData uintptr, phCounter *pdhCounterHandle) uint32 {
+ if pdhAddEnglishCounterW == nil {
+ return ErrorInvalidFunction
+ }
+
+ ptxt, _ := windows.UTF16PtrFromString(szFullCounterPath)
+ ret, _, _ := pdhAddEnglishCounterW.Call(
+ uintptr(hQuery),
+ uintptr(unsafe.Pointer(ptxt)),
+ dwUserData,
+ uintptr(unsafe.Pointer(phCounter)))
+
+ return uint32(ret)
+}
+
+// CloseQuery closes all counters contained in the specified query, closes all handles related to the query,
+// and frees all memory associated with the query.
+func CloseQuery(hQuery pdhQueryHandle) uint32 {
+ ret, _, _ := pdhCloseQuery.Call(uintptr(hQuery))
+
+ return uint32(ret)
+}
+
+// CollectQueryData collects the current raw data value for all counters in the specified query and updates the status
+// code of each counter. With some counters, this function needs to be repeatedly called before the value
+// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code
+// requires at least two calls:
+//
+// var handle win.PDH_HQUERY
+// var counterHandle win.PDH_HCOUNTER
+// ret := win.OpenQuery(0, 0, &handle)
+// ret = win.AddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle)
+// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE
+//
+// ret = win.CollectQueryData(handle)
+// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA
+// ret = win.GetFormattedCounterValueDouble(counterHandle, 0, &derp)
+//
+// ret = win.CollectQueryData(handle)
+// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS
+// ret = win.GetFormattedCounterValueDouble(counterHandle, 0, &derp)
+//
+// The CollectQueryData will return an error in the first call because it needs two values for
+// displaying the correct data for the processor idle time. The second call will have a 0 return code.
+func CollectQueryData(hQuery pdhQueryHandle) uint32 {
+ ret, _, _ := pdhCollectQueryData.Call(uintptr(hQuery))
+
+ return uint32(ret)
+}
+
+// CollectQueryDataWithTime queries data from perfmon, retrieving the device/windows timestamp from the node it was collected on.
+// Converts the filetime structure to a GO time class and returns the native time.
+func CollectQueryDataWithTime(hQuery pdhQueryHandle) (uint32, time.Time) {
+ var localFileTime windows.Filetime
+
+ ret, _, _ := pdhCollectQueryDataWithTime.Call(uintptr(hQuery), uintptr(unsafe.Pointer(&localFileTime)))
+
+ if ret == ErrorSuccess {
+ var utcFileTime windows.Filetime
+
+ if ret := kernel32.LocalFileTimeToFileTime(&localFileTime, &utcFileTime); ret == 0 {
+ return uint32(ErrorFailure), time.Now()
+ }
+
+ retTime := time.Unix(0, utcFileTime.Nanoseconds())
+
+ return uint32(ErrorSuccess), retTime
+ }
+
+ return uint32(ret), time.Now()
+}
+
+// GetFormattedCounterValueDouble formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue.
+// This function does not directly translate to a Windows counterpart due to union specialization tricks.
+func GetFormattedCounterValueDouble(hCounter pdhCounterHandle, lpdwType *uint32, pValue *FmtCounterValueDouble) uint32 {
+ ret, _, _ := pdhGetFormattedCounterValue.Call(
+ uintptr(hCounter),
+ uintptr(FmtDouble|FmtNocap100),
+ uintptr(unsafe.Pointer(lpdwType)),
+ uintptr(unsafe.Pointer(pValue)))
+
+ return uint32(ret)
+}
+
+// GetFormattedCounterArrayDouble returns an array of formatted counter values. Use this function when you want to format the counter values of a
+// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type FmtCounterValueItemDouble.
+// An example of how this function can be used:
+//
+// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character
+//
+// // omitted all necessary stuff ...
+//
+// var bufSize uint32
+// var bufCount uint32
+// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
+// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
+//
+// for {
+// // collect
+// ret := win.CollectQueryData(queryHandle)
+// if ret == win.ERROR_SUCCESS {
+// ret = win.GetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
+// if ret == win.PDH_MORE_DATA {
+// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
+// ret = win.GetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0])
+// for i := 0; i < int(bufCount); i++ {
+// c := filledBuf[i]
+// var s string = win.UTF16PtrToString(c.SzName)
+// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue)
+// }
+//
+// filledBuf = nil
+// // Need to at least set bufSize to zero, because if not, the function will not
+// // return PDH_MORE_DATA and will not set the bufSize.
+// bufCount = 0
+// bufSize = 0
+// }
+//
+// time.Sleep(2000 * time.Millisecond)
+// }
+// }
+func GetFormattedCounterArrayDouble(hCounter pdhCounterHandle, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *byte) uint32 {
+ ret, _, _ := pdhGetFormattedCounterArrayW.Call(
+ uintptr(hCounter),
+ uintptr(FmtDouble|FmtNocap100),
+ uintptr(unsafe.Pointer(lpdwBufferSize)),
+ uintptr(unsafe.Pointer(lpdwBufferCount)),
+ uintptr(unsafe.Pointer(itemBuffer)))
+
+ return uint32(ret)
+}
+
+// OpenQuery creates a new query that is used to manage the collection of performance data.
+// szDataSource is a null terminated string that specifies the name of the log file from which to
+// retrieve the performance data. If 0, performance data is collected from a real-time data source.
+// dwUserData is a user-defined value to associate with this query. To retrieve the user data later,
+// call GetCounterInfo and access dwQueryUserData of the CounterInfo structure. phQuery is
+// the handle to the query, and must be used in subsequent calls. This function returns a PDH_
+// constant error code, or ErrorSuccess if the call succeeded.
+func OpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *pdhQueryHandle) uint32 {
+ ret, _, _ := pdhOpenQuery.Call(
+ szDataSource,
+ dwUserData,
+ uintptr(unsafe.Pointer(phQuery)))
+
+ return uint32(ret)
+}
+
+// ExpandWildCardPath examines the specified computer or log file and returns those counter paths that match the given counter path
+// which contains wildcard characters. The general counter path format is as follows:
+//
+// \\computer\object(parent/instance#index)\counter
+//
+// The parent, instance, index, and counter components of the counter path may contain either a valid name or a wildcard character.
+// The computer, parent, instance, and index components are not necessary for all counters.
+//
+// The following is a list of the possible formats:
+//
+// \\computer\object(parent/instance#index)\counter
+// \\computer\object(parent/instance)\counter
+// \\computer\object(instance#index)\counter
+// \\computer\object(instance)\counter
+// \\computer\object\counter
+// \object(parent/instance#index)\counter
+// \object(parent/instance)\counter
+// \object(instance#index)\counter
+// \object(instance)\counter
+// \object\counter
+// Use an asterisk (*) as the wildcard character, for example, \object(*)\counter.
+//
+// If a wildcard character is specified in the parent name, all instances of the specified object
+// that match the specified instance and counter fields will be returned.
+// For example, \object(*/instance)\counter.
+//
+// If a wildcard character is specified in the instance name, all instances of the specified object and parent object will be returned if all instance names
+// corresponding to the specified index match the wildcard character. For example, \object(parent/*)\counter.
+// If the object does not contain an instance, an error occurs.
+//
+// If a wildcard character is specified in the counter name, all counters of the specified object are returned.
+//
+// Partial counter path string matches (for example, "pro*") are supported.
+func ExpandWildCardPath(szWildCardPath string, mszExpandedPathList *uint16, pcchPathListLength *uint32) uint32 {
+ ptxt, _ := windows.UTF16PtrFromString(szWildCardPath)
+ flags := uint32(0) // expand instances and counters
+ ret, _, _ := pdhExpandWildCardPathW.Call(
+ 0, // search counters on local computer
+ uintptr(unsafe.Pointer(ptxt)),
+ uintptr(unsafe.Pointer(mszExpandedPathList)),
+ uintptr(unsafe.Pointer(pcchPathListLength)),
+ uintptr(unsafe.Pointer(&flags)))
+
+ return uint32(ret)
+}
+
+// ValidatePath validates a path. Will return ErrorSuccess when ok, or PdhCstatusBadCountername when the path is erroneous.
+func ValidatePath(path string) uint32 {
+ ptxt, _ := windows.UTF16PtrFromString(path)
+ ret, _, _ := pdhValidatePathW.Call(uintptr(unsafe.Pointer(ptxt)))
+
+ return uint32(ret)
+}
+
+func FormatError(msgID uint32) string {
+ var flags uint32 = windows.FORMAT_MESSAGE_FROM_HMODULE | windows.FORMAT_MESSAGE_ARGUMENT_ARRAY | windows.FORMAT_MESSAGE_IGNORE_INSERTS
+
+ buf := make([]uint16, 300)
+ _, err := windows.FormatMessage(flags, libPdhDll.Handle(), msgID, 0, buf, nil)
+
+ if err == nil {
+ return windows.UTF16PtrToString(&buf[0])
+ }
+
+ return fmt.Sprintf("(pdhErr=%d) %s", msgID, err.Error())
+}
+
+// GetCounterInfo retrieves information about a counter, such as data size, counter type, path, and user-supplied data values
+// hCounter [in]
+// Handle of the counter from which you want to retrieve information. The AddCounter function returns this handle.
+//
+// bRetrieveExplainText [in]
+// Determines whether explain text is retrieved. If you set this parameter to TRUE, the explain text for the counter is retrieved.
+// If you set this parameter to FALSE, the field in the returned buffer is NULL.
+//
+// pdwBufferSize [in, out]
+// Size of the lpBuffer buffer, in bytes. If zero on input, the function returns PdhMoreData and sets this parameter to the required buffer size.
+// If the buffer is larger than the required size, the function sets this parameter to the actual size of the buffer that was used.
+// If the specified size on input is greater than zero but less than the required size, you should not rely on the returned size to reallocate the buffer.
+//
+// lpBuffer [out]
+// Caller-allocated buffer that receives a CounterInfo structure.
+// The structure is variable-length, because the string data is appended to the end of the fixed-format portion of the structure.
+// This is done so that all data is returned in a single buffer allocated by the caller. Set to NULL if pdwBufferSize is zero.
+func GetCounterInfo(hCounter pdhCounterHandle, bRetrieveExplainText int, pdwBufferSize *uint32, lpBuffer *byte) uint32 {
+ ret, _, _ := pdhGetCounterInfoW.Call(
+ uintptr(hCounter),
+ uintptr(bRetrieveExplainText),
+ uintptr(unsafe.Pointer(pdwBufferSize)),
+ uintptr(unsafe.Pointer(lpBuffer)))
+
+ return uint32(ret)
+}
+
+// GetRawCounterValue returns the current raw value of the counter.
+// If the specified counter instance does not exist, this function will return ErrorSuccess
+// and the CStatus member of the RawCounter structure will contain PdhCstatusNoInstance.
+//
+// hCounter [in]
+// Handle of the counter from which to retrieve the current raw value. The AddCounter function returns this handle.
+//
+// lpdwType [out]
+// Receives the counter type. For a list of counter types, see the Counter Types section of the Windows Server 2003 Deployment Kit.
+// This parameter is optional.
+//
+// pValue [out]
+// A RawCounter structure that receives the counter value.
+func GetRawCounterValue(hCounter pdhCounterHandle, lpdwType *uint32, pValue *RawCounter) uint32 {
+ ret, _, _ := pdhGetRawCounterValue.Call(
+ uintptr(hCounter),
+ uintptr(unsafe.Pointer(lpdwType)),
+ uintptr(unsafe.Pointer(pValue)))
+
+ return uint32(ret)
+}
+
+// GetRawCounterArray returns an array of raw values from the specified counter. Use this function when you want to retrieve the raw counter values
+// of a counter that contains a wildcard character for the instance name.
+// hCounter
+// Handle of the counter for whose current raw instance values you want to retrieve. The AddCounter function returns this handle.
+//
+// lpdwBufferSize
+// Size of the ItemBuffer buffer, in bytes. If zero on input, the function returns PdhMoreData and sets this parameter to the required buffer size.
+// If the buffer is larger than the required size, the function sets this parameter to the actual size of the buffer that was used.
+// If the specified size on input is greater than zero but less than the required size, you should not rely on the returned size to reallocate the buffer.
+//
+// lpdwItemCount
+// Number of raw counter values in the ItemBuffer buffer.
+//
+// ItemBuffer
+// Caller-allocated buffer that receives the array of RawCounterItem structures; the structures contain the raw instance counter values.
+// Set to NULL if lpdwBufferSize is zero.
+func GetRawCounterArray(hCounter pdhCounterHandle, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *byte) uint32 {
+ ret, _, _ := pdhGetRawCounterArrayW.Call(
+ uintptr(hCounter),
+ uintptr(unsafe.Pointer(lpdwBufferSize)),
+ uintptr(unsafe.Pointer(lpdwBufferCount)),
+ uintptr(unsafe.Pointer(itemBuffer)))
+
+ return uint32(ret)
+}
+
+// GetCounterTimeBase returns the time base of the specified counter.
+// hCounter
+// Handle of the counter for whose current raw instance values you want to retrieve. The AddCounter function returns this handle.
+//
+// lpdwItemCount
+// Time base that specifies the number of performance values a counter samples per second.
+func GetCounterTimeBase(hCounter pdhCounterHandle, pTimeBase *int64) uint32 {
+ ret, _, _ := pdhPdhGetCounterTimeBase.Call(
+ uintptr(hCounter),
+ uintptr(unsafe.Pointer(pTimeBase)))
+
+ return uint32(ret)
+}
diff --git a/internal/pdh/registry/LICENSE b/internal/pdh/registry/LICENSE
new file mode 100644
index 000000000..169f2ab94
--- /dev/null
+++ b/internal/pdh/registry/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2018 Leopold Schabel / The perflib_exporter authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/internal/pdh/registry/collector.go b/internal/pdh/registry/collector.go
new file mode 100644
index 000000000..6c75341d7
--- /dev/null
+++ b/internal/pdh/registry/collector.go
@@ -0,0 +1,178 @@
+package registry
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/prometheus-community/windows_exporter/internal/mi"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+)
+
+type Collector struct {
+ object string
+ query string
+
+ counters map[string]Counter
+ nameIndexValue int
+}
+
+type Counter struct {
+ Name string
+ Desc string
+ Instances map[string]uint32
+ Type uint32
+ Frequency float64
+
+ FieldIndexValue int
+ FieldIndexSecondValue int
+}
+
+func NewCollector[T any](object string, _ []string) (*Collector, error) {
+ collector := &Collector{
+ object: object,
+ query: MapCounterToIndex(object),
+ nameIndexValue: -1,
+ counters: make(map[string]Counter),
+ }
+
+ var values [0]T
+ valueType := reflect.TypeOf(values).Elem()
+
+ if f, ok := valueType.FieldByName("Name"); ok {
+ if f.Type.Kind() == reflect.String {
+ collector.nameIndexValue = f.Index[0]
+ }
+ }
+
+ for _, f := range reflect.VisibleFields(valueType) {
+ counterName, ok := f.Tag.Lookup("perfdata")
+ if !ok {
+ continue
+ }
+
+ var counter Counter
+ if counter, ok = collector.counters[counterName]; !ok {
+ counter = Counter{
+ Name: counterName,
+ FieldIndexSecondValue: -1,
+ FieldIndexValue: -1,
+ }
+ }
+
+ if strings.HasSuffix(counterName, ",secondvalue") {
+ counterName = strings.TrimSuffix(counterName, ",secondvalue")
+
+ counter.FieldIndexSecondValue = f.Index[0]
+ } else {
+ counter.FieldIndexValue = f.Index[0]
+ }
+
+ collector.counters[counterName] = counter
+ }
+
+ var collectValues []T
+
+ if err := collector.Collect(&collectValues); err != nil {
+ return nil, fmt.Errorf("failed to collect initial data: %w", err)
+ }
+
+ return collector, nil
+}
+
+func (c *Collector) Describe() map[string]string {
+ return map[string]string{}
+}
+
+func (c *Collector) Collect(data any) error {
+ dv := reflect.ValueOf(data)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return mi.ErrInvalidEntityType
+ }
+
+ dv = dv.Elem()
+
+ elemType := dv.Type().Elem()
+ elemValue := reflect.ValueOf(reflect.New(elemType).Interface()).Elem()
+
+ if dv.Kind() != reflect.Slice || elemType.Kind() != reflect.Struct {
+ return mi.ErrInvalidEntityType
+ }
+
+ perfObjects, err := QueryPerformanceData(c.query, c.object)
+ if err != nil {
+ return fmt.Errorf("QueryPerformanceData: %w", err)
+ }
+
+ if len(perfObjects) == 0 || perfObjects[0] == nil || len(perfObjects[0].Instances) == 0 {
+ return nil
+ }
+
+ if dv.Len() != 0 {
+ dv.Set(reflect.MakeSlice(dv.Type(), 0, len(perfObjects[0].Instances)))
+ }
+
+ dv.Clear()
+
+ for _, perfObject := range perfObjects {
+ if perfObject.Name != c.object {
+ continue
+ }
+
+ for _, perfInstance := range perfObject.Instances {
+ instanceName := perfInstance.Name
+ if strings.HasSuffix(instanceName, "_Total") {
+ continue
+ }
+
+ if instanceName == "" || instanceName == "*" {
+ instanceName = pdh.InstanceEmpty
+ }
+
+ if c.nameIndexValue != -1 {
+ elemValue.Field(c.nameIndexValue).SetString(instanceName)
+ }
+
+ dv.Set(reflect.Append(dv, elemValue))
+ index := dv.Len() - 1
+
+ for _, perfCounter := range perfInstance.Counters {
+ if perfCounter.Def.IsBaseValue && !perfCounter.Def.IsNanosecondCounter {
+ continue
+ }
+
+ counter, ok := c.counters[perfCounter.Def.Name]
+ if !ok {
+ continue
+ }
+
+ switch perfCounter.Def.CounterType {
+ case pdh.PERF_ELAPSED_TIME:
+ dv.Index(index).
+ Field(counter.FieldIndexValue).
+ SetFloat(float64((perfCounter.Value - pdh.WindowsEpoch) / perfObject.Frequency))
+ case pdh.PERF_100NSEC_TIMER, pdh.PERF_PRECISION_100NS_TIMER:
+ dv.Index(index).
+ Field(counter.FieldIndexValue).
+ SetFloat(float64(perfCounter.Value) * pdh.TicksToSecondScaleFactor)
+ default:
+ if counter.FieldIndexSecondValue != -1 {
+ dv.Index(index).
+ Field(counter.FieldIndexSecondValue).
+ SetFloat(float64(perfCounter.SecondValue))
+ }
+
+ if counter.FieldIndexValue != -1 {
+ dv.Index(index).
+ Field(counter.FieldIndexValue).
+ SetFloat(float64(perfCounter.Value))
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *Collector) Close() {}
diff --git a/internal/pdh/registry/nametable.go b/internal/pdh/registry/nametable.go
new file mode 100644
index 000000000..0040c9ecb
--- /dev/null
+++ b/internal/pdh/registry/nametable.go
@@ -0,0 +1,86 @@
+package registry
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "sync"
+)
+
+// CounterNameTable Initialize global name tables
+// profiling, add option to disable name tables if necessary
+// Not sure if we should resolve the names at all or just have the caller do it on demand
+// (for many use cases the index is sufficient)
+//
+//nolint:gochecknoglobals
+var CounterNameTable = *QueryNameTable("Counter 009")
+
+func (p *perfObjectType) LookupName() string {
+ return CounterNameTable.LookupString(p.ObjectNameTitleIndex)
+}
+
+type NameTable struct {
+ once sync.Once
+
+ name string
+
+ table struct {
+ index map[uint32]string
+ string map[string]uint32
+ }
+}
+
+func (t *NameTable) LookupString(index uint32) string {
+ t.initialize()
+
+ return t.table.index[index]
+}
+
+func (t *NameTable) LookupIndex(str string) uint32 {
+ t.initialize()
+
+ return t.table.string[str]
+}
+
+// QueryNameTable Query a perflib name table from the v1. Specify the type and the language
+// code (i.e. "Counter 009" or "Help 009") for English language.
+func QueryNameTable(tableName string) *NameTable {
+ return &NameTable{
+ name: tableName,
+ }
+}
+
+func (t *NameTable) initialize() {
+ t.once.Do(func() {
+ t.table.index = make(map[uint32]string)
+ t.table.string = make(map[string]uint32)
+
+ buffer, err := queryRawData(t.name)
+ if err != nil {
+ panic(err)
+ }
+
+ r := bytes.NewReader(buffer)
+
+ for {
+ index, err := readUTF16String(r)
+ if err != nil {
+ break
+ }
+
+ desc, err := readUTF16String(r)
+ if err != nil {
+ break
+ }
+
+ if err != nil {
+ panic(fmt.Sprint("Invalid index ", index))
+ }
+
+ indexInt, _ := strconv.Atoi(index)
+
+ t.table.index[uint32(indexInt)] = desc
+ t.table.string[desc] = uint32(indexInt)
+ }
+ })
+}
diff --git a/internal/pdh/registry/perflib.go b/internal/pdh/registry/perflib.go
new file mode 100644
index 000000000..86a5f5165
--- /dev/null
+++ b/internal/pdh/registry/perflib.go
@@ -0,0 +1,493 @@
+package registry
+
+/*
+Go bindings for the HKEY_PERFORMANCE_DATA perflib / Performance Counters interface.
+
+# Overview
+
+HKEY_PERFORMANCE_DATA is a low-level alternative to the higher-level PDH library and WMI.
+It operates on blocks of counters and only returns raw values without calculating rates
+or formatting them, which is exactly what you want for, say, a Prometheus exporter
+(not so much for a GUI like Windows Performance Monitor).
+
+Its overhead is much lower than the high-level libraries.
+
+It operates on the same set of perflib providers as PDH and WMI. See this document
+for more details on the relationship between the different libraries:
+https://msdn.microsoft.com/en-us/library/windows/desktop/aa371643(v=vs.85).aspx
+
+Example C++ source code:
+https://msdn.microsoft.com/de-de/library/windows/desktop/aa372138(v=vs.85).aspx
+
+For now, the API is not stable and is probably going to change in future
+perflib_exporter releases. If you want to use this library, send the author an email
+so we can discuss your requirements and stabilize the API.
+
+# Names
+
+Counter names and help texts are resolved by looking up an index in a name table.
+Since Microsoft loves internalization, both names and help texts can be requested
+any locally available language.
+
+The library automatically loads the name tables and resolves all identifiers
+in English ("Name" and "HelpText" struct members). You can manually resolve
+identifiers in a different language by using the NameTable API.
+
+# Performance Counters intro
+
+Windows has a system-wide performance counter mechanism. Most performance counters
+are stored as actual counters, not gauges (with some exceptions).
+There's additional metadata which defines how the counter should be presented to the user
+(for example, as a calculated rate). This library disregards all of the display metadata.
+
+At the top level, there's a number of performance counter objects.
+Each object has counter definitions, which contain the metadata for a particular
+counter, and either zero or multiple instances. We hide the fact that there are
+objects with no instances, and simply return a single null instance.
+
+There's one counter per counter definition and instance (or the object itself, if
+there are no instances).
+
+Behind the scenes, every perflib DLL provides one or more objects.
+Perflib has a v1 where DLLs are dynamically registered and
+unregistered. Some third party applications like VMWare provide their own counters,
+but this is, sadly, a rare occurrence.
+
+Different Windows releases have different numbers of counters.
+
+Objects and counters are identified by well-known indices.
+
+Here's an example object with one instance:
+
+ 4320 WSMan Quota Statistics [7 counters, 1 instance(s)]
+ `-- "WinRMService"
+ `-- Total Requests/Second [4322] = 59
+ `-- User Quota Violations/Second [4324] = 0
+ `-- System Quota Violations/Second [4326] = 0
+ `-- Active Shells [4328] = 0
+ `-- Active Operations [4330] = 0
+ `-- Active Users [4332] = 0
+ `-- Process ID [4334] = 928
+
+All "per second" metrics are counters, the rest are gauges.
+
+Another example, with no instance:
+
+ 4600 Network QoS Policy [6 counters, 1 instance(s)]
+ `-- (default)
+ `-- Packets transmitted [4602] = 1744
+ `-- Packets transmitted/sec [4604] = 4852
+ `-- Bytes transmitted [4606] = 4853
+ `-- Bytes transmitted/sec [4608] = 180388626632
+ `-- Packets dropped [4610] = 0
+ `-- Packets dropped/sec [4612] = 0
+
+You can access the same values using PowerShell's Get-Counter cmdlet
+or the Performance Monitor.
+
+ > Get-Counter '\WSMan Quota Statistics(WinRMService)\Process ID'
+
+ Timestamp CounterSamples
+ --------- --------------
+ 1/28/2018 10:18:00 PM \\DEV\wsman quota statistics(winrmservice)\process id :
+ 928
+
+ > (Get-Counter '\Process(Idle)\% Processor Time').CounterSamples[0] | Format-List *
+ [..detailed output...]
+
+Data for some of the objects is also available through WMI:
+
+ > Get-CimInstance Win32_PerfRawData_Counters_WSManQuotaStatistics
+
+ Name : WinRMService
+ [...]
+ ActiveOperations : 0
+ ActiveShells : 0
+ ActiveUsers : 0
+ ProcessID : 928
+ SystemQuotaViolationsPerSecond : 0
+ TotalRequestsPerSecond : 59
+ UserQuotaViolationsPerSecond : 0
+*/
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+ "unsafe"
+
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
+ "golang.org/x/sys/windows"
+)
+
+// There's a LittleEndian field in the PERF header - we ought to check it.
+//
+//nolint:gochecknoglobals
+var bo = binary.LittleEndian
+
+// PerfObject Top-level performance object (like "Process").
+type PerfObject struct {
+ Name string
+ // NameIndex Same index you pass to QueryPerformanceData
+ NameIndex uint
+ Instances []*PerfInstance
+ CounterDefs []*PerfCounterDef
+
+ Frequency int64
+
+ rawData *perfObjectType
+}
+
+// PerfInstance Each object can have multiple instances. For example,
+// In case the object has no instances, we return one single PerfInstance with an empty name.
+type PerfInstance struct {
+ // *not* resolved using a name table
+ Name string
+ Counters []*PerfCounter
+
+ rawData *perfInstanceDefinition
+ rawCounterBlock *perfCounterBlock
+}
+
+type PerfCounterDef struct {
+ Name string
+ NameIndex uint
+
+ // For debugging - subject to removal. CounterType is a perflib
+ // implementation detail (see perflib.h) and should not be used outside
+ // of this package. We export it so we can show it on /dump.
+ CounterType uint32
+
+ // PERF_TYPE_COUNTER (otherwise, it's a gauge)
+ IsCounter bool
+ // PERF_COUNTER_BASE (base value of a multi-value fraction)
+ IsBaseValue bool
+ // PERF_TIMER_100NS
+ IsNanosecondCounter bool
+ HasSecondValue bool
+
+ rawData *perfCounterDefinition
+}
+
+type PerfCounter struct {
+ Value int64
+ Def *PerfCounterDef
+ SecondValue int64
+}
+
+//nolint:gochecknoglobals
+var (
+ bufLenGlobal = uint32(400000)
+ bufLenCostly = uint32(2000000)
+)
+
+// queryRawData Queries the performance counter buffer using RegQueryValueEx, returning raw bytes. See:
+// https://msdn.microsoft.com/de-de/library/windows/desktop/aa373219(v=vs.85).aspx
+func queryRawData(query string) ([]byte, error) {
+ var (
+ valType uint32
+ buffer []byte
+ bufLen uint32
+ )
+
+ switch query {
+ case "Global":
+ bufLen = bufLenGlobal
+ case "Costly":
+ bufLen = bufLenCostly
+ default:
+ // depends on the number of values requested
+ // need make an educated guess
+ numCounters := len(strings.Split(query, " "))
+ bufLen = uint32(150000 * numCounters)
+ }
+
+ buffer = make([]byte, bufLen)
+
+ name, err := windows.UTF16PtrFromString(query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode query string: %w", err)
+ }
+
+ for {
+ bufLen := uint32(len(buffer))
+
+ err := windows.RegQueryValueEx(
+ windows.HKEY_PERFORMANCE_DATA,
+ name,
+ nil,
+ &valType,
+ (*byte)(unsafe.Pointer(&buffer[0])),
+ &bufLen)
+
+ switch {
+ case errors.Is(err, error(windows.ERROR_MORE_DATA)):
+ newBuffer := make([]byte, len(buffer)+16384)
+ copy(newBuffer, buffer)
+ buffer = newBuffer
+
+ continue
+ case errors.Is(err, error(windows.ERROR_BUSY)):
+ time.Sleep(50 * time.Millisecond)
+
+ continue
+ case err != nil:
+ var errNo windows.Errno
+ if errors.As(err, &errNo) {
+ return nil, fmt.Errorf("ReqQueryValueEx failed: %w errno %d", err, uint(errNo))
+ }
+
+ return nil, err
+ }
+
+ buffer = buffer[:bufLen]
+
+ switch query {
+ case "Global":
+ if bufLen > bufLenGlobal {
+ bufLenGlobal = bufLen
+ }
+ case "Costly":
+ if bufLen > bufLenCostly {
+ bufLenCostly = bufLen
+ }
+ }
+
+ return buffer, nil
+ }
+}
+
+/*
+QueryPerformanceData Query all performance counters that match a given query.
+
+The query can be any of the following:
+
+- "Global" (all performance counters except those Windows marked as costly)
+
+- "Costly" (only the costly ones)
+
+- One or more object indices, separated by spaces ("238 2 5")
+
+Many objects have dependencies - if you query one of them, you often get back
+more than you asked for.
+*/
+func QueryPerformanceData(query string, counterName string) ([]*PerfObject, error) {
+ buffer, err := queryRawData(query)
+ if err != nil {
+ return nil, err
+ }
+
+ r := bytes.NewReader(buffer)
+
+ // Read global header
+
+ header := new(perfDataBlock)
+
+ err = header.BinaryReadFrom(r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read performance data block for %q with: %w", query, err)
+ }
+
+ // Check for "PERF" signature
+ if header.Signature != [4]uint16{80, 69, 82, 70} {
+ panic("Invalid performance block header")
+ }
+
+ // Parse the performance data
+
+ numObjects := int(header.NumObjectTypes)
+ numFilteredObjects := 0
+
+ objects := make([]*PerfObject, numObjects)
+
+ objOffset := int64(header.HeaderLength)
+
+ for i := range numObjects {
+ _, err := r.Seek(objOffset, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+
+ obj := new(perfObjectType)
+
+ err = obj.BinaryReadFrom(r)
+ if err != nil {
+ return nil, err
+ }
+
+ perfCounterName := obj.LookupName()
+
+ if counterName != "" && perfCounterName != counterName {
+ objOffset += int64(obj.TotalByteLength)
+
+ continue
+ }
+
+ numCounterDefs := int(obj.NumCounters)
+ numInstances := int(obj.NumInstances)
+
+ // Perf objects can have no instances. The perflib differentiates
+ // between objects with instances and without, but we just create
+ // an empty instance in order to simplify the interface.
+ if numInstances <= 0 {
+ numInstances = 1
+ }
+
+ instances := make([]*PerfInstance, numInstances)
+ counterDefs := make([]*PerfCounterDef, numCounterDefs)
+
+ objects[i] = &PerfObject{
+ Name: perfCounterName,
+ NameIndex: uint(obj.ObjectNameTitleIndex),
+ Instances: instances,
+ CounterDefs: counterDefs,
+ Frequency: obj.PerfFreq,
+ rawData: obj,
+ }
+
+ for i := range numCounterDefs {
+ def := new(perfCounterDefinition)
+
+ err := def.BinaryReadFrom(r)
+ if err != nil {
+ return nil, err
+ }
+
+ counterDefs[i] = &PerfCounterDef{
+ Name: def.LookupName(),
+ NameIndex: uint(def.CounterNameTitleIndex),
+ rawData: def,
+
+ CounterType: def.CounterType,
+
+ IsCounter: def.CounterType&0x400 == 0x400,
+ IsBaseValue: def.CounterType&0x00030000 == 0x00030000,
+ IsNanosecondCounter: def.CounterType&0x00100000 == 0x00100000,
+ HasSecondValue: def.CounterType == pdh.PERF_AVERAGE_BULK,
+ }
+ }
+
+ if obj.NumInstances <= 0 { //nolint:nestif
+ blockOffset := objOffset + int64(obj.DefinitionLength)
+
+ if _, err := r.Seek(blockOffset, io.SeekStart); err != nil {
+ return nil, err
+ }
+
+ _, counters, err := parseCounterBlock(buffer, r, blockOffset, counterDefs)
+ if err != nil {
+ return nil, err
+ }
+
+ instances[0] = &PerfInstance{
+ Name: "",
+ Counters: counters,
+ rawData: nil,
+ rawCounterBlock: nil,
+ }
+ } else {
+ instOffset := objOffset + int64(obj.DefinitionLength)
+
+ for i := range numInstances {
+ if _, err := r.Seek(instOffset, io.SeekStart); err != nil {
+ return nil, err
+ }
+
+ inst := new(perfInstanceDefinition)
+
+ if err = inst.BinaryReadFrom(r); err != nil {
+ return nil, err
+ }
+
+ name, _ := readUTF16StringAtPos(r, instOffset+int64(inst.NameOffset), inst.NameLength)
+ pos := instOffset + int64(inst.ByteLength)
+
+ offset, counters, err := parseCounterBlock(buffer, r, pos, counterDefs)
+ if err != nil {
+ return nil, err
+ }
+
+ instances[i] = &PerfInstance{
+ Name: name,
+ Counters: counters,
+ rawData: inst,
+ }
+
+ instOffset = pos + offset
+ }
+ }
+
+ if counterName != "" {
+ return objects[i : i+1], nil
+ }
+
+ // Next perfObjectType
+ objOffset += int64(obj.TotalByteLength)
+ numFilteredObjects++
+ }
+
+ return objects[:numFilteredObjects], nil
+}
+
+func parseCounterBlock(b []byte, r io.ReadSeeker, pos int64, defs []*PerfCounterDef) (int64, []*PerfCounter, error) {
+ _, err := r.Seek(pos, io.SeekStart)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ block := new(perfCounterBlock)
+
+ err = block.BinaryReadFrom(r)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ counters := make([]*PerfCounter, len(defs))
+
+ for i, def := range defs {
+ valueOffset := pos + int64(def.rawData.CounterOffset)
+ value := convertCounterValue(def.rawData, b, valueOffset)
+ secondValue := int64(0)
+
+ if def.HasSecondValue {
+ secondValue = convertCounterValue(def.rawData, b, valueOffset+8)
+ }
+
+ counters[i] = &PerfCounter{
+ Value: value,
+ Def: def,
+ SecondValue: secondValue,
+ }
+ }
+
+ return int64(block.ByteLength), counters, nil
+}
+
+func convertCounterValue(counterDef *perfCounterDefinition, buffer []byte, valueOffset int64) int64 {
+ /*
+ We can safely ignore the type since we're not interested in anything except the raw value.
+ We also ignore all of the other attributes (timestamp, presentation, multi counter values...)
+
+ See also: winperf.h.
+
+ Here's the most common value for CounterType:
+
+ 65536 32bit counter
+ 65792 64bit counter
+ 272696320 32bit rate
+ 272696576 64bit rate
+
+ */
+ switch counterDef.CounterSize {
+ case 4:
+ return int64(bo.Uint32(buffer[valueOffset:(valueOffset + 4)]))
+ case 8:
+ return int64(bo.Uint64(buffer[valueOffset:(valueOffset + 8)]))
+ default:
+ return int64(bo.Uint32(buffer[valueOffset:(valueOffset + 4)]))
+ }
+}
diff --git a/internal/pdh/registry/perflib_test.go b/internal/pdh/registry/perflib_test.go
new file mode 100644
index 000000000..b87981813
--- /dev/null
+++ b/internal/pdh/registry/perflib_test.go
@@ -0,0 +1,11 @@
+package registry
+
+import (
+ "testing"
+)
+
+func BenchmarkQueryPerformanceData(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ _, _ = QueryPerformanceData("Global", "")
+ }
+}
diff --git a/internal/pdh/registry/raw_types.go b/internal/pdh/registry/raw_types.go
new file mode 100644
index 000000000..f23927ea8
--- /dev/null
+++ b/internal/pdh/registry/raw_types.go
@@ -0,0 +1,173 @@
+package registry
+
+import (
+ "encoding/binary"
+ "io"
+
+ "golang.org/x/sys/windows"
+)
+
+/*
+perfDataBlock
+See: https://msdn.microsoft.com/de-de/library/windows/desktop/aa373157(v=vs.85).aspx
+
+ typedef struct _PERF_DATA_BLOCK {
+ WCHAR Signature[4];
+ DWORD LittleEndian;
+ DWORD Version;
+ DWORD Revision;
+ DWORD TotalByteLength;
+ DWORD HeaderLength;
+ DWORD NumObjectTypes;
+ DWORD DefaultObject;
+ SYSTEMTIME SystemTime;
+ LARGE_INTEGER PerfTime;
+ LARGE_INTEGER PerfFreq;
+ LARGE_INTEGER PerfTime100nSec;
+ DWORD SystemNameLength;
+ DWORD SystemNameOffset;
+ } PERF_DATA_BLOCK;
+*/
+type perfDataBlock struct {
+ Signature [4]uint16
+ LittleEndian uint32
+ Version uint32
+ Revision uint32
+ TotalByteLength uint32
+ HeaderLength uint32
+ NumObjectTypes uint32
+ DefaultObject int32
+ SystemTime windows.Systemtime
+ _ uint32 // unknown field
+ PerfTime int64
+ PerfFreq int64
+ PerfTime100nSec int64
+ SystemNameLength uint32
+ SystemNameOffset uint32
+}
+
+func (p *perfDataBlock) BinaryReadFrom(r io.Reader) error {
+ return binary.Read(r, bo, p)
+}
+
+/*
+perfObjectType
+See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa373160(v=vs.85).aspx
+
+ typedef struct _PERF_OBJECT_TYPE {
+ DWORD TotalByteLength;
+ DWORD DefinitionLength;
+ DWORD HeaderLength;
+ DWORD ObjectNameTitleIndex;
+ LPWSTR ObjectNameTitle;
+ DWORD ObjectHelpTitleIndex;
+ LPWSTR ObjectHelpTitle;
+ DWORD DetailLevel;
+ DWORD NumCounters;
+ DWORD DefaultCounter;
+ DWORD NumInstances;
+ DWORD CodePage;
+ LARGE_INTEGER PerfTime;
+ LARGE_INTEGER PerfFreq;
+ } PERF_OBJECT_TYPE;
+*/
+type perfObjectType struct {
+ TotalByteLength uint32
+ DefinitionLength uint32
+ HeaderLength uint32
+ ObjectNameTitleIndex uint32
+ ObjectNameTitle uint32
+ ObjectHelpTitleIndex uint32
+ ObjectHelpTitle uint32
+ DetailLevel uint32
+ NumCounters uint32
+ DefaultCounter int32
+ NumInstances int32
+ CodePage uint32
+ PerfTime int64
+ PerfFreq int64
+}
+
+func (p *perfObjectType) BinaryReadFrom(r io.Reader) error {
+ return binary.Read(r, bo, p)
+}
+
+/*
+perfCounterDefinition
+See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa373150(v=vs.85).aspx
+
+ typedef struct _PERF_COUNTER_DEFINITION {
+ DWORD ByteLength;
+ DWORD CounterNameTitleIndex;
+ LPWSTR CounterNameTitle;
+ DWORD CounterHelpTitleIndex;
+ LPWSTR CounterHelpTitle;
+ LONG DefaultScale;
+ DWORD DetailLevel;
+ DWORD CounterType;
+ DWORD CounterSize;
+ DWORD CounterOffset;
+ } PERF_COUNTER_DEFINITION;
+*/
+type perfCounterDefinition struct {
+ ByteLength uint32
+ CounterNameTitleIndex uint32
+ CounterNameTitle uint32
+ CounterHelpTitleIndex uint32
+ CounterHelpTitle uint32
+ DefaultScale int32
+ DetailLevel uint32
+ CounterType uint32
+ CounterSize uint32
+ CounterOffset uint32
+}
+
+func (p *perfCounterDefinition) BinaryReadFrom(r io.Reader) error {
+ return binary.Read(r, bo, p)
+}
+
+func (p *perfCounterDefinition) LookupName() string {
+ return CounterNameTable.LookupString(p.CounterNameTitleIndex)
+}
+
+/*
+perfCounterBlock
+See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa373147(v=vs.85).aspx
+
+ typedef struct _PERF_COUNTER_BLOCK {
+ DWORD ByteLength;
+ } PERF_COUNTER_BLOCK;
+*/
+type perfCounterBlock struct {
+ ByteLength uint32
+}
+
+func (p *perfCounterBlock) BinaryReadFrom(r io.Reader) error {
+ return binary.Read(r, bo, p)
+}
+
+/*
+perfInstanceDefinition
+See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa373159(v=vs.85).aspx
+
+ typedef struct _PERF_INSTANCE_DEFINITION {
+ DWORD ByteLength;
+ DWORD ParentObjectTitleIndex;
+ DWORD ParentObjectInstance;
+ DWORD UniqueID;
+ DWORD NameOffset;
+ DWORD NameLength;
+ } PERF_INSTANCE_DEFINITION;
+*/
+type perfInstanceDefinition struct {
+ ByteLength uint32
+ ParentObjectTitleIndex uint32
+ ParentObjectInstance uint32
+ UniqueID uint32
+ NameOffset uint32
+ NameLength uint32
+}
+
+func (p *perfInstanceDefinition) BinaryReadFrom(r io.Reader) error {
+ return binary.Read(r, bo, p)
+}
diff --git a/internal/pdh/registry/utf16.go b/internal/pdh/registry/utf16.go
new file mode 100644
index 000000000..77b5e4609
--- /dev/null
+++ b/internal/pdh/registry/utf16.go
@@ -0,0 +1,49 @@
+package registry
+
+import (
+ "encoding/binary"
+ "io"
+
+ "golang.org/x/sys/windows"
+)
+
+// readUTF16StringAtPos Read an unterminated UTF16 string at a given position, specifying its length.
+func readUTF16StringAtPos(r io.ReadSeeker, absPos int64, length uint32) (string, error) {
+ value := make([]uint16, length/2)
+
+ _, err := r.Seek(absPos, io.SeekStart)
+ if err != nil {
+ return "", err
+ }
+
+ err = binary.Read(r, bo, value)
+ if err != nil {
+ return "", err
+ }
+
+ return windows.UTF16ToString(value), nil
+}
+
+// readUTF16String Reads a null-terminated UTF16 string at the current offset.
+func readUTF16String(r io.Reader) (string, error) {
+ var err error
+
+ b := make([]byte, 2)
+ out := make([]uint16, 0, 100)
+
+ for i := 0; err == nil; i += 2 {
+ _, err = r.Read(b)
+
+ if b[0] == 0 && b[1] == 0 {
+ break
+ }
+
+ out = append(out, bo.Uint16(b))
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ return windows.UTF16ToString(out), nil
+}
diff --git a/internal/pdh/registry/utils.go b/internal/pdh/registry/utils.go
new file mode 100644
index 000000000..ad7b31d07
--- /dev/null
+++ b/internal/pdh/registry/utils.go
@@ -0,0 +1,9 @@
+package registry
+
+import (
+ "strconv"
+)
+
+func MapCounterToIndex(name string) string {
+ return strconv.Itoa(int(CounterNameTable.LookupIndex(name)))
+}
diff --git a/internal/perfdata/pdh_amd64.go b/internal/pdh/types.go
similarity index 55%
rename from internal/perfdata/pdh_amd64.go
rename to internal/pdh/types.go
index 02ce5ec00..5e6efdcbd 100644
--- a/internal/perfdata/pdh_amd64.go
+++ b/internal/pdh/types.go
@@ -1,79 +1,75 @@
-// Copyright (c) 2010-2024 The win Authors. All rights reserved.
-// Copyright (c) 2024 The prometheus-community Authors. All rights reserved.
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
+// http://www.apache.org/licenses/LICENSE-2.0
//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// This is the official list of 'win' authors for copyright purposes.
-//
-// Alexander Neumann
-// Joseph Watson
-// Kevin Pors
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
//go:build windows
-package perfdata
+package pdh
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "golang.org/x/sys/windows"
+)
-import "golang.org/x/sys/windows"
+const (
+ InstanceEmpty = "------"
+ InstanceTotal = "_Total"
+)
+
+type CounterValue struct {
+ Type prometheus.ValueType
+ FirstValue float64
+ SecondValue float64
+}
-// PdhFmtCountervalueDouble is a union specialization for double values.
-type PdhFmtCountervalueDouble struct {
+// FmtCounterValueDouble is a union specialization for double values.
+type FmtCounterValueDouble struct {
CStatus uint32
DoubleValue float64
}
-// PdhFmtCounterValueLarge is a union specialization for 64-bit integer values.
-type PdhFmtCounterValueLarge struct {
+// FmtCounterValueLarge is a union specialization for 64-bit integer values.
+type FmtCounterValueLarge struct {
CStatus uint32
LargeValue int64
}
-// PdhFmtCounterValueLong is a union specialization for long values.
-type PdhFmtCounterValueLong struct {
+// FmtCounterValueLong is a union specialization for long values.
+type FmtCounterValueLong struct {
CStatus uint32
LongValue int32
padding [4]byte //nolint:unused // Memory reservation
}
-// PdhFmtCountervalueItemDouble is a union specialization for double values, used by PdhGetFormattedCounterArrayDouble.
-type PdhFmtCountervalueItemDouble struct {
+// FmtCounterValueItemDouble is a union specialization for double values, used by GetFormattedCounterArrayDouble.
+type FmtCounterValueItemDouble struct {
SzName *uint16
- FmtValue PdhFmtCountervalueDouble
+ FmtValue FmtCounterValueDouble
}
-// PdhFmtCounterValueItemLarge is a union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge().
-type PdhFmtCounterValueItemLarge struct {
+// FmtCounterValueItemLarge is a union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge().
+type FmtCounterValueItemLarge struct {
SzName *uint16 // pointer to a string
- FmtValue PdhFmtCounterValueLarge
+ FmtValue FmtCounterValueLarge
}
-// PdhFmtCounterValueItemLong is a union specialization for long values, used by PdhGetFormattedCounterArrayLong().
-type PdhFmtCounterValueItemLong struct {
+// FmtCounterValueItemLong is a union specialization for long values, used by PdhGetFormattedCounterArrayLong().
+type FmtCounterValueItemLong struct {
SzName *uint16 // pointer to a string
- FmtValue PdhFmtCounterValueLong
+ FmtValue FmtCounterValueLong
}
-// PdhCounterInfo structure contains information describing the properties of a counter. This information also includes the counter path.
-type PdhCounterInfo struct {
+// CounterInfo structure contains information describing the properties of a counter. This information also includes the counter path.
+type CounterInfo struct {
// Size of the structure, including the appended strings, in bytes.
DwLength uint32
// Counter type. For a list of counter types,
@@ -91,9 +87,9 @@ type PdhCounterInfo struct {
LScale int32
// Default scale factor as suggested by the counter's provider.
LDefaultScale int32
- // The value passed in the dwUserData parameter when calling PdhAddCounter.
+ // The value passed in the dwUserData parameter when calling AddCounter.
DwUserData *uint32
- // The value passed in the dwUserData parameter when calling PdhOpenQuery.
+ // The value passed in the dwUserData parameter when calling OpenQuery.
DwQueryUserData *uint32
// Null-terminated string that specifies the full counter path. The string follows this structure in memory.
SzFullPath *uint16 // pointer to a string
@@ -118,9 +114,9 @@ type PdhCounterInfo struct {
DataBuffer [1]uint32 // pointer to an extra space
}
-// The PdhRawCounter structure returns the data as it was collected from the counter provider.
+// The RawCounter structure returns the data as it was collected from the counter provider.
// No translation, formatting, or other interpretation is performed on the data.
-type PdhRawCounter struct {
+type RawCounter struct {
// Counter status that indicates if the counter value is valid. Check this member before using the data in a calculation or displaying its value.
// For a list of possible values, see https://docs.microsoft.com/windows/desktop/PerfCtrs/checking-pdh-interface-return-values
CStatus uint32
@@ -135,9 +131,9 @@ type PdhRawCounter struct {
MultiCount uint32
}
-type PdhRawCounterItem struct {
+type RawCounterItem struct {
// Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure.
SzName *uint16
- // A PdhRawCounter structure that contains the raw counter value of the instance
- RawValue PdhRawCounter
+ // A RawCounter structure that contains the raw counter value of the instance
+ RawValue RawCounter
}
diff --git a/internal/pdh/types/types.go b/internal/pdh/types/types.go
new file mode 100644
index 000000000..1f2eff347
--- /dev/null
+++ b/internal/pdh/types/types.go
@@ -0,0 +1,6 @@
+package types
+
+type Collector interface {
+ Collect(dst any) error
+ Close()
+}
diff --git a/internal/perfdata/collector.go b/internal/perfdata/collector.go
deleted file mode 100644
index c949b0e92..000000000
--- a/internal/perfdata/collector.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package perfdata
-
-import (
- "errors"
- "fmt"
- "slices"
- "strings"
- "sync"
- "unsafe"
-
- "github.com/prometheus/client_golang/prometheus"
- "golang.org/x/sys/windows"
-)
-
-//nolint:gochecknoglobals
-var (
- InstancesAll = []string{"*"}
- InstancesTotal = []string{InstanceTotal}
-)
-
-type CounterValues = map[string]map[string]CounterValue
-
-type Collector struct {
- object string
- counters map[string]Counter
- handle pdhQueryHandle
- totalCounterRequested bool
- mu sync.RWMutex
-
- collectCh chan struct{}
- counterValuesCh chan CounterValues
- errorCh chan error
-}
-
-type Counter struct {
- Name string
- Desc string
- Instances map[string]pdhCounterHandle
- Type uint32
- Frequency int64
-}
-
-func NewCollector(object string, instances []string, counters []string) (*Collector, error) {
- var handle pdhQueryHandle
-
- if ret := PdhOpenQuery(0, 0, &handle); ret != ErrorSuccess {
- return nil, NewPdhError(ret)
- }
-
- if len(instances) == 0 {
- instances = []string{InstanceEmpty}
- }
-
- collector := &Collector{
- object: object,
- counters: make(map[string]Counter, len(counters)),
- handle: handle,
- totalCounterRequested: slices.Contains(instances, InstanceTotal),
- mu: sync.RWMutex{},
- }
-
- errs := make([]error, 0, len(counters))
-
- for _, counterName := range counters {
- if counterName == "*" {
- return nil, errors.New("wildcard counters are not supported")
- }
-
- counter := Counter{
- Name: counterName,
- Instances: make(map[string]pdhCounterHandle, len(instances)),
- }
-
- var counterPath string
-
- for _, instance := range instances {
- counterPath = formatCounterPath(object, instance, counterName)
-
- var counterHandle pdhCounterHandle
-
- if ret := PdhAddEnglishCounter(handle, counterPath, 0, &counterHandle); ret != ErrorSuccess {
- errs = append(errs, fmt.Errorf("failed to add counter %s: %w", counterPath, NewPdhError(ret)))
-
- continue
- }
-
- counter.Instances[instance] = counterHandle
-
- if counter.Type != 0 {
- continue
- }
-
- // Get the info with the current buffer size
- bufLen := uint32(0)
-
- if ret := PdhGetCounterInfo(counterHandle, 0, &bufLen, nil); ret != PdhMoreData {
- errs = append(errs, fmt.Errorf("PdhGetCounterInfo: %w", NewPdhError(ret)))
-
- continue
- }
-
- buf := make([]byte, bufLen)
- if ret := PdhGetCounterInfo(counterHandle, 0, &bufLen, &buf[0]); ret != ErrorSuccess {
- errs = append(errs, fmt.Errorf("PdhGetCounterInfo: %w", NewPdhError(ret)))
-
- continue
- }
-
- ci := (*PdhCounterInfo)(unsafe.Pointer(&buf[0]))
- counter.Type = ci.DwType
- counter.Desc = windows.UTF16PtrToString(ci.SzExplainText)
-
- if counter.Type == PERF_ELAPSED_TIME {
- if ret := PdhGetCounterTimeBase(counterHandle, &counter.Frequency); ret != ErrorSuccess {
- errs = append(errs, fmt.Errorf("PdhGetCounterTimeBase: %w", NewPdhError(ret)))
-
- continue
- }
- }
- }
-
- collector.counters[counterName] = counter
- }
-
- if err := errors.Join(errs...); err != nil {
- return collector, fmt.Errorf("failed to initialize collector: %w", err)
- }
-
- if len(collector.counters) == 0 {
- return nil, errors.New("no counters configured")
- }
-
- collector.collectCh = make(chan struct{})
- collector.counterValuesCh = make(chan CounterValues)
- collector.errorCh = make(chan error)
-
- go collector.collectRoutine()
-
- if _, err := collector.Collect(); err != nil && !errors.Is(err, ErrNoData) {
- return collector, fmt.Errorf("failed to collect initial data: %w", err)
- }
-
- return collector, nil
-}
-
-func (c *Collector) Describe() map[string]string {
- if c == nil {
- return map[string]string{}
- }
-
- c.mu.RLock()
- defer c.mu.RUnlock()
-
- desc := make(map[string]string, len(c.counters))
-
- for _, counter := range c.counters {
- desc[counter.Name] = counter.Desc
- }
-
- return desc
-}
-
-func (c *Collector) Collect() (CounterValues, error) {
- if c == nil {
- return CounterValues{}, ErrPerformanceCounterNotInitialized
- }
-
- c.mu.RLock()
- defer c.mu.RUnlock()
-
- if len(c.counters) == 0 || c.handle == 0 || c.collectCh == nil || c.counterValuesCh == nil || c.errorCh == nil {
- return nil, ErrPerformanceCounterNotInitialized
- }
-
- c.collectCh <- struct{}{}
-
- return <-c.counterValuesCh, <-c.errorCh
-}
-
-func (c *Collector) collectRoutine() {
- var (
- itemCount uint32
- bytesNeeded uint32
- )
-
- buf := make([]byte, 1)
-
- for range c.collectCh {
- if ret := PdhCollectQueryData(c.handle); ret != ErrorSuccess {
- c.counterValuesCh <- nil
- c.errorCh <- fmt.Errorf("failed to collect query data: %w", NewPdhError(ret))
-
- continue
- }
-
- counterValues, err := (func() (CounterValues, error) {
- var data CounterValues
-
- for _, counter := range c.counters {
- for _, instance := range counter.Instances {
- // Get the info with the current buffer size
- bytesNeeded = uint32(cap(buf))
-
- for {
- ret := PdhGetRawCounterArray(instance, &bytesNeeded, &itemCount, &buf[0])
-
- if ret == ErrorSuccess {
- break
- }
-
- if err := NewPdhError(ret); ret != PdhMoreData && !isKnownCounterDataError(err) {
- return nil, fmt.Errorf("PdhGetRawCounterArray: %w", err)
- }
-
- if bytesNeeded <= uint32(cap(buf)) {
- return nil, fmt.Errorf("PdhGetRawCounterArray reports buffer too small (%d), but buffer is large enough (%d): %w", uint32(cap(buf)), bytesNeeded, NewPdhError(ret))
- }
-
- buf = make([]byte, bytesNeeded)
- }
-
- items := unsafe.Slice((*PdhRawCounterItem)(unsafe.Pointer(&buf[0])), itemCount)
-
- if data == nil {
- data = make(CounterValues, itemCount)
- }
-
- var metricType prometheus.ValueType
- if val, ok := supportedCounterTypes[counter.Type]; ok {
- metricType = val
- } else {
- metricType = prometheus.GaugeValue
- }
-
- for _, item := range items {
- if item.RawValue.CStatus == PdhCstatusValidData || item.RawValue.CStatus == PdhCstatusNewData {
- instanceName := windows.UTF16PtrToString(item.SzName)
- if strings.HasSuffix(instanceName, InstanceTotal) && !c.totalCounterRequested {
- continue
- }
-
- if instanceName == "" || instanceName == "*" {
- instanceName = InstanceEmpty
- }
-
- if _, ok := data[instanceName]; !ok {
- data[instanceName] = make(map[string]CounterValue, len(c.counters))
- }
-
- values := CounterValue{
- Type: metricType,
- }
-
- // This is a workaround for the issue with the elapsed time counter type.
- // Source: https://github.com/prometheus-community/windows_exporter/pull/335/files#diff-d5d2528f559ba2648c2866aec34b1eaa5c094dedb52bd0ff22aa5eb83226bd8dR76-R83
- // Ref: https://learn.microsoft.com/en-us/windows/win32/perfctrs/calculating-counter-values
-
- switch counter.Type {
- case PERF_ELAPSED_TIME:
- values.FirstValue = float64((item.RawValue.FirstValue - WindowsEpoch) / counter.Frequency)
- case PERF_100NSEC_TIMER, PERF_PRECISION_100NS_TIMER:
- values.FirstValue = float64(item.RawValue.FirstValue) * TicksToSecondScaleFactor
- case PERF_AVERAGE_BULK, PERF_RAW_FRACTION:
- values.FirstValue = float64(item.RawValue.FirstValue)
- values.SecondValue = float64(item.RawValue.SecondValue)
- default:
- values.FirstValue = float64(item.RawValue.FirstValue)
- }
-
- data[instanceName][counter.Name] = values
- }
- }
- }
- }
-
- return data, nil
- })()
-
- if err == nil && len(counterValues) == 0 {
- err = ErrNoData
- }
-
- c.counterValuesCh <- counterValues
- c.errorCh <- err
- }
-}
-
-func (c *Collector) Close() {
- if c == nil {
- return
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- PdhCloseQuery(c.handle)
-
- c.handle = 0
-
- if c.collectCh != nil {
- close(c.collectCh)
- }
-
- if c.counterValuesCh != nil {
- close(c.counterValuesCh)
- }
-
- if c.errorCh != nil {
- close(c.errorCh)
- }
-
- c.counterValuesCh = nil
- c.collectCh = nil
- c.errorCh = nil
-}
-
-func formatCounterPath(object, instance, counterName string) string {
- var counterPath string
-
- if instance == InstanceEmpty {
- counterPath = fmt.Sprintf(`\%s\%s`, object, counterName)
- } else {
- counterPath = fmt.Sprintf(`\%s(%s)\%s`, object, instance, counterName)
- }
-
- return counterPath
-}
-
-func isKnownCounterDataError(err error) bool {
- var pdhErr *Error
-
- return errors.As(err, &pdhErr) && (pdhErr.ErrorCode == PdhInvalidData ||
- pdhErr.ErrorCode == PdhCalcNegativeDenominator ||
- pdhErr.ErrorCode == PdhCalcNegativeValue ||
- pdhErr.ErrorCode == PdhCstatusInvalidData ||
- pdhErr.ErrorCode == PdhCstatusNoInstance ||
- pdhErr.ErrorCode == PdhNoData)
-}
diff --git a/internal/perfdata/collector_bench_test.go b/internal/perfdata/collector_bench_test.go
deleted file mode 100644
index b16e807b3..000000000
--- a/internal/perfdata/collector_bench_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package perfdata_test
-
-import (
- "testing"
-
- v2 "github.com/prometheus-community/windows_exporter/internal/perfdata"
- "github.com/stretchr/testify/require"
-)
-
-func BenchmarkTestCollector(b *testing.B) {
- counters := []string{
- "% Processor Time",
- "% Privileged Time",
- "% User Time",
- "Creating Process ID",
- "Elapsed Time",
- "Handle Count",
- "ID Process",
- "IO Data Bytes/sec",
- "IO Data Operations/sec",
- "IO Other Bytes/sec",
- "IO Other Operations/sec",
- "IO Read Bytes/sec",
- "IO Read Operations/sec",
- "IO Write Bytes/sec",
- "IO Write Operations/sec",
- "Page Faults/sec",
- "Page File Bytes Peak",
- "Page File Bytes",
- "Pool Nonpaged Bytes",
- "Pool Paged Bytes",
- "Priority Base",
- "Private Bytes",
- "Thread Count",
- "Virtual Bytes Peak",
- "Virtual Bytes",
- "Working Set - Private",
- "Working Set Peak",
- "Working Set",
- }
- performanceData, err := v2.NewCollector("Process", []string{"*"}, counters)
- require.NoError(b, err)
-
- for i := 0; i < b.N; i++ {
- _, _ = performanceData.Collect()
- }
-
- performanceData.Close()
-
- b.ReportAllocs()
-}
diff --git a/internal/perfdata/collector_test.go b/internal/perfdata/collector_test.go
deleted file mode 100644
index 03a694582..000000000
--- a/internal/perfdata/collector_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package perfdata_test
-
-import (
- "testing"
- "time"
-
- v2 "github.com/prometheus-community/windows_exporter/internal/perfdata"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestCollector(t *testing.T) {
- t.Parallel()
-
- for _, tc := range []struct {
- object string
- instances []string
- counters []string
- }{
- {
- object: "Memory",
- counters: []string{
- "Available Bytes",
- "Available KBytes",
- "Available MBytes",
- "Cache Bytes",
- "Cache Bytes Peak",
- "Cache Faults/sec",
- "Commit Limit",
- "Committed Bytes",
- "Demand Zero Faults/sec",
- "Free & Zero Page List Bytes",
- "Free System Page Table Entries",
- "Modified Page List Bytes",
- "Page Reads/sec",
- },
- }, {
- object: "TCPv4",
- counters: []string{
- "Connection Failures",
- "Connections Active",
- "Connections Established",
- "Connections Passive",
- "Connections Reset",
- "Segments/sec",
- "Segments Received/sec",
- "Segments Retransmitted/sec",
- "Segments Sent/sec",
- },
- }, {
- object: "Process",
- instances: []string{"*"},
- counters: []string{
- "Thread Count",
- "ID Process",
- },
- },
- } {
- t.Run(tc.object, func(t *testing.T) {
- t.Parallel()
-
- performanceData, err := v2.NewCollector(tc.object, tc.instances, tc.counters)
- require.NoError(t, err)
-
- time.Sleep(100 * time.Millisecond)
-
- data, err := performanceData.Collect()
- require.NoError(t, err)
- require.NotEmpty(t, data)
-
- for instance, d := range data {
- require.NotEmpty(t, d)
-
- if instance == "Idle" || instance == "Secure System" {
- continue
- }
-
- for _, c := range tc.counters {
- assert.NotZerof(t, d[c].FirstValue, "object: %s, instance: %s, counter: %s", tc.object, instance, c)
- }
- }
- })
- }
-}
diff --git a/internal/perfdata/pdh.go b/internal/perfdata/pdh.go
deleted file mode 100644
index d07b3a210..000000000
--- a/internal/perfdata/pdh.go
+++ /dev/null
@@ -1,633 +0,0 @@
-// Copyright (c) 2010-2024 The win Authors. All rights reserved.
-// Copyright (c) 2024 The prometheus-community Authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// This is the official list of 'win' authors for copyright purposes.
-//
-// Alexander Neumann
-// Joseph Watson
-// Kevin Pors
-
-//go:build windows
-
-package perfdata
-
-import (
- "fmt"
- "time"
- "unsafe"
-
- "github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
- "golang.org/x/sys/windows"
-)
-
-// Error codes.
-const (
- ErrorSuccess = 0
- ErrorFailure = 1
- ErrorInvalidFunction = 1
-)
-
-type (
- HANDLE uintptr
-)
-
-// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h
-
-const (
- PdhCstatusValidData uint32 = 0x00000000 // The returned data is valid.
- PdhCstatusNewData uint32 = 0x00000001 // The return data value is valid and different from the last sample.
- PdhCstatusNoMachine uint32 = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline.
- PdhCstatusNoInstance uint32 = 0x800007D1
- PdhMoreData uint32 = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'.
- PdhCstatusItemNotValidated uint32 = 0x800007D3
- PdhRetry uint32 = 0x800007D4
- PdhNoData uint32 = 0x800007D5 // The query does not currently contain any counters (for example, limited access)
- PdhCalcNegativeDenominator uint32 = 0x800007D6
- PdhCalcNegativeTimebase uint32 = 0x800007D7
- PdhCalcNegativeValue uint32 = 0x800007D8
- PdhDialogCancelled uint32 = 0x800007D9
- PdhEndOfLogFile uint32 = 0x800007DA
- PdhAsyncQueryTimeout uint32 = 0x800007DB
- PdhCannotSetDefaultRealtimeDatasource uint32 = 0x800007DC
- PdhCstatusNoObject uint32 = 0xC0000BB8
- PdhCstatusNoCounter uint32 = 0xC0000BB9 // The specified counter could not be found.
- PdhCstatusInvalidData uint32 = 0xC0000BBA // The counter was successfully found, but the data returned is not valid.
- PdhMemoryAllocationFailure uint32 = 0xC0000BBB
- PdhInvalidHandle uint32 = 0xC0000BBC
- PdhInvalidArgument uint32 = 0xC0000BBD // Required argument is missing or incorrect.
- PdhFunctionNotFound uint32 = 0xC0000BBE
- PdhCstatusNoCountername uint32 = 0xC0000BBF
- PdhCstatusBadCountername uint32 = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path.
- PdhInvalidBuffer uint32 = 0xC0000BC1
- PdhInsufficientBuffer uint32 = 0xC0000BC2
- PdhCannotConnectMachine uint32 = 0xC0000BC3
- PdhInvalidPath uint32 = 0xC0000BC4
- PdhInvalidInstance uint32 = 0xC0000BC5
- PdhInvalidData uint32 = 0xC0000BC6 // specified counter does not contain valid data or a successful status code.
- PdhNoDialogData uint32 = 0xC0000BC7
- PdhCannotReadNameStrings uint32 = 0xC0000BC8
- PdhLogFileCreateError uint32 = 0xC0000BC9
- PdhLogFileOpenError uint32 = 0xC0000BCA
- PdhLogTypeNotFound uint32 = 0xC0000BCB
- PdhNoMoreData uint32 = 0xC0000BCC
- PdhEntryNotInLogFile uint32 = 0xC0000BCD
- PdhDataSourceIsLogFile uint32 = 0xC0000BCE
- PdhDataSourceIsRealTime uint32 = 0xC0000BCF
- PdhUnableReadLogHeader uint32 = 0xC0000BD0
- PdhFileNotFound uint32 = 0xC0000BD1
- PdhFileAlreadyExists uint32 = 0xC0000BD2
- PdhNotImplemented uint32 = 0xC0000BD3
- PdhStringNotFound uint32 = 0xC0000BD4
- PdhUnableMapNameFiles uint32 = 0x80000BD5
- PdhUnknownLogFormat uint32 = 0xC0000BD6
- PdhUnknownLogsvcCommand uint32 = 0xC0000BD7
- PdhLogsvcQueryNotFound uint32 = 0xC0000BD8
- PdhLogsvcNotOpened uint32 = 0xC0000BD9
- PdhWbemError uint32 = 0xC0000BDA
- PdhAccessDenied uint32 = 0xC0000BDB
- PdhLogFileTooSmall uint32 = 0xC0000BDC
- PdhInvalidDatasource uint32 = 0xC0000BDD
- PdhInvalidSqldb uint32 = 0xC0000BDE
- PdhNoCounters uint32 = 0xC0000BDF
- PdhSQLAllocFailed uint32 = 0xC0000BE0
- PdhSQLAllocconFailed uint32 = 0xC0000BE1
- PdhSQLExecDirectFailed uint32 = 0xC0000BE2
- PdhSQLFetchFailed uint32 = 0xC0000BE3
- PdhSQLRowcountFailed uint32 = 0xC0000BE4
- PdhSQLMoreResultsFailed uint32 = 0xC0000BE5
- PdhSQLConnectFailed uint32 = 0xC0000BE6
- PdhSQLBindFailed uint32 = 0xC0000BE7
- PdhCannotConnectWmiServer uint32 = 0xC0000BE8
- PdhPlaCollectionAlreadyRunning uint32 = 0xC0000BE9
- PdhPlaErrorScheduleOverlap uint32 = 0xC0000BEA
- PdhPlaCollectionNotFound uint32 = 0xC0000BEB
- PdhPlaErrorScheduleElapsed uint32 = 0xC0000BEC
- PdhPlaErrorNostart uint32 = 0xC0000BED
- PdhPlaErrorAlreadyExists uint32 = 0xC0000BEE
- PdhPlaErrorTypeMismatch uint32 = 0xC0000BEF
- PdhPlaErrorFilepath uint32 = 0xC0000BF0
- PdhPlaServiceError uint32 = 0xC0000BF1
- PdhPlaValidationError uint32 = 0xC0000BF2
- PdhPlaValidationWarning uint32 = 0x80000BF3
- PdhPlaErrorNameTooLong uint32 = 0xC0000BF4
- PdhInvalidSQLLogFormat uint32 = 0xC0000BF5
- PdhCounterAlreadyInQuery uint32 = 0xC0000BF6
- PdhBinaryLogCorrupt uint32 = 0xC0000BF7
- PdhLogSampleTooSmall uint32 = 0xC0000BF8
- PdhOsLaterVersion uint32 = 0xC0000BF9
- PdhOsEarlierVersion uint32 = 0xC0000BFA
- PdhIncorrectAppendTime uint32 = 0xC0000BFB
- PdhUnmatchedAppendCounter uint32 = 0xC0000BFC
- PdhSQLAlterDetailFailed uint32 = 0xC0000BFD
- PdhQueryPerfDataTimeout uint32 = 0xC0000BFE
-)
-
-//nolint:gochecknoglobals
-var PDHErrors = map[uint32]string{
- PdhCstatusValidData: "PDH_CSTATUS_VALID_DATA",
- PdhCstatusNewData: "PDH_CSTATUS_NEW_DATA",
- PdhCstatusNoMachine: "PDH_CSTATUS_NO_MACHINE",
- PdhCstatusNoInstance: "PDH_CSTATUS_NO_INSTANCE",
- PdhMoreData: "PDH_MORE_DATA",
- PdhCstatusItemNotValidated: "PDH_CSTATUS_ITEM_NOT_VALIDATED",
- PdhRetry: "PDH_RETRY",
- PdhNoData: "PDH_NO_DATA",
- PdhCalcNegativeDenominator: "PDH_CALC_NEGATIVE_DENOMINATOR",
- PdhCalcNegativeTimebase: "PDH_CALC_NEGATIVE_TIMEBASE",
- PdhCalcNegativeValue: "PDH_CALC_NEGATIVE_VALUE",
- PdhDialogCancelled: "PDH_DIALOG_CANCELLED",
- PdhEndOfLogFile: "PDH_END_OF_LOG_FILE",
- PdhAsyncQueryTimeout: "PDH_ASYNC_QUERY_TIMEOUT",
- PdhCannotSetDefaultRealtimeDatasource: "PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE",
- PdhCstatusNoObject: "PDH_CSTATUS_NO_OBJECT",
- PdhCstatusNoCounter: "PDH_CSTATUS_NO_COUNTER",
- PdhCstatusInvalidData: "PDH_CSTATUS_INVALID_DATA",
- PdhMemoryAllocationFailure: "PDH_MEMORY_ALLOCATION_FAILURE",
- PdhInvalidHandle: "PDH_INVALID_HANDLE",
- PdhInvalidArgument: "PDH_INVALID_ARGUMENT",
- PdhFunctionNotFound: "PDH_FUNCTION_NOT_FOUND",
- PdhCstatusNoCountername: "PDH_CSTATUS_NO_COUNTERNAME",
- PdhCstatusBadCountername: "PDH_CSTATUS_BAD_COUNTERNAME",
- PdhInvalidBuffer: "PDH_INVALID_BUFFER",
- PdhInsufficientBuffer: "PDH_INSUFFICIENT_BUFFER",
- PdhCannotConnectMachine: "PDH_CANNOT_CONNECT_MACHINE",
- PdhInvalidPath: "PDH_INVALID_PATH",
- PdhInvalidInstance: "PDH_INVALID_INSTANCE",
- PdhInvalidData: "PDH_INVALID_DATA",
- PdhNoDialogData: "PDH_NO_DIALOG_DATA",
- PdhCannotReadNameStrings: "PDH_CANNOT_READ_NAME_STRINGS",
- PdhLogFileCreateError: "PDH_LOG_FILE_CREATE_ERROR",
- PdhLogFileOpenError: "PDH_LOG_FILE_OPEN_ERROR",
- PdhLogTypeNotFound: "PDH_LOG_TYPE_NOT_FOUND",
- PdhNoMoreData: "PDH_NO_MORE_DATA",
- PdhEntryNotInLogFile: "PDH_ENTRY_NOT_IN_LOG_FILE",
- PdhDataSourceIsLogFile: "PDH_DATA_SOURCE_IS_LOG_FILE",
- PdhDataSourceIsRealTime: "PDH_DATA_SOURCE_IS_REAL_TIME",
- PdhUnableReadLogHeader: "PDH_UNABLE_READ_LOG_HEADER",
- PdhFileNotFound: "PDH_FILE_NOT_FOUND",
- PdhFileAlreadyExists: "PDH_FILE_ALREADY_EXISTS",
- PdhNotImplemented: "PDH_NOT_IMPLEMENTED",
- PdhStringNotFound: "PDH_STRING_NOT_FOUND",
- PdhUnableMapNameFiles: "PDH_UNABLE_MAP_NAME_FILES",
- PdhUnknownLogFormat: "PDH_UNKNOWN_LOG_FORMAT",
- PdhUnknownLogsvcCommand: "PDH_UNKNOWN_LOGSVC_COMMAND",
- PdhLogsvcQueryNotFound: "PDH_LOGSVC_QUERY_NOT_FOUND",
- PdhLogsvcNotOpened: "PDH_LOGSVC_NOT_OPENED",
- PdhWbemError: "PDH_WBEM_ERROR",
- PdhAccessDenied: "PDH_ACCESS_DENIED",
- PdhLogFileTooSmall: "PDH_LOG_FILE_TOO_SMALL",
- PdhInvalidDatasource: "PDH_INVALID_DATASOURCE",
- PdhInvalidSqldb: "PDH_INVALID_SQLDB",
- PdhNoCounters: "PDH_NO_COUNTERS",
- PdhSQLAllocFailed: "PDH_SQL_ALLOC_FAILED",
- PdhSQLAllocconFailed: "PDH_SQL_ALLOCCON_FAILED",
- PdhSQLExecDirectFailed: "PDH_SQL_EXEC_DIRECT_FAILED",
- PdhSQLFetchFailed: "PDH_SQL_FETCH_FAILED",
- PdhSQLRowcountFailed: "PDH_SQL_ROWCOUNT_FAILED",
- PdhSQLMoreResultsFailed: "PDH_SQL_MORE_RESULTS_FAILED",
- PdhSQLConnectFailed: "PDH_SQL_CONNECT_FAILED",
- PdhSQLBindFailed: "PDH_SQL_BIND_FAILED",
- PdhCannotConnectWmiServer: "PDH_CANNOT_CONNECT_WMI_SERVER",
- PdhPlaCollectionAlreadyRunning: "PDH_PLA_COLLECTION_ALREADY_RUNNING",
- PdhPlaErrorScheduleOverlap: "PDH_PLA_ERROR_SCHEDULE_OVERLAP",
- PdhPlaCollectionNotFound: "PDH_PLA_COLLECTION_NOT_FOUND",
- PdhPlaErrorScheduleElapsed: "PDH_PLA_ERROR_SCHEDULE_ELAPSED",
- PdhPlaErrorNostart: "PDH_PLA_ERROR_NOSTART",
- PdhPlaErrorAlreadyExists: "PDH_PLA_ERROR_ALREADY_EXISTS",
- PdhPlaErrorTypeMismatch: "PDH_PLA_ERROR_TYPE_MISMATCH",
- PdhPlaErrorFilepath: "PDH_PLA_ERROR_FILEPATH",
- PdhPlaServiceError: "PDH_PLA_SERVICE_ERROR",
- PdhPlaValidationError: "PDH_PLA_VALIDATION_ERROR",
- PdhPlaValidationWarning: "PDH_PLA_VALIDATION_WARNING",
- PdhPlaErrorNameTooLong: "PDH_PLA_ERROR_NAME_TOO_LONG",
- PdhInvalidSQLLogFormat: "PDH_INVALID_SQL_LOG_FORMAT",
- PdhCounterAlreadyInQuery: "PDH_COUNTER_ALREADY_IN_QUERY",
- PdhBinaryLogCorrupt: "PDH_BINARY_LOG_CORRUPT",
- PdhLogSampleTooSmall: "PDH_LOG_SAMPLE_TOO_SMALL",
- PdhOsLaterVersion: "PDH_OS_LATER_VERSION",
- PdhOsEarlierVersion: "PDH_OS_EARLIER_VERSION",
- PdhIncorrectAppendTime: "PDH_INCORRECT_APPEND_TIME",
- PdhUnmatchedAppendCounter: "PDH_UNMATCHED_APPEND_COUNTER",
- PdhSQLAlterDetailFailed: "PDH_SQL_ALTER_DETAIL_FAILED",
- PdhQueryPerfDataTimeout: "PDH_QUERY_PERF_DATA_TIMEOUT",
-}
-
-// Formatting options for GetFormattedCounterValue().
-//
-//goland:noinspection GoUnusedConst
-const (
- PdhFmtRaw = 0x00000010
- PdhFmtAnsi = 0x00000020
- PdhFmtUnicode = 0x00000040
- PdhFmtLong = 0x00000100 // Return data as a long int.
- PdhFmtDouble = 0x00000200 // Return data as a double precision floating point real.
- PdhFmtLarge = 0x00000400 // Return data as a 64 bit integer.
- PdhFmtNoscale = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor.
- PdhFmt1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000.
- PdhFmtNodata = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing.
- PdhFmtNocap100 = 0x00008000 // can be OR-ed: do not cap values > 100.
- PerfDetailCostly = 0x00010000
- PerfDetailStandard = 0x0000FFFF
-)
-
-type (
- pdhQueryHandle HANDLE // query handle
- pdhCounterHandle HANDLE // counter handle
-)
-
-//nolint:gochecknoglobals
-var (
- libPdhDll = windows.NewLazySystemDLL("pdh.dll")
-
- pdhAddCounterW = libPdhDll.NewProc("PdhAddCounterW")
- pdhAddEnglishCounterW = libPdhDll.NewProc("PdhAddEnglishCounterW")
- pdhCloseQuery = libPdhDll.NewProc("PdhCloseQuery")
- pdhCollectQueryData = libPdhDll.NewProc("PdhCollectQueryData")
- pdhCollectQueryDataWithTime = libPdhDll.NewProc("PdhCollectQueryDataWithTime")
- pdhGetFormattedCounterValue = libPdhDll.NewProc("PdhGetFormattedCounterValue")
- pdhGetFormattedCounterArrayW = libPdhDll.NewProc("PdhGetFormattedCounterArrayW")
- pdhOpenQuery = libPdhDll.NewProc("PdhOpenQuery")
- pdhValidatePathW = libPdhDll.NewProc("PdhValidatePathW")
- pdhExpandWildCardPathW = libPdhDll.NewProc("PdhExpandWildCardPathW")
- pdhGetCounterInfoW = libPdhDll.NewProc("PdhGetCounterInfoW")
- pdhGetRawCounterValue = libPdhDll.NewProc("PdhGetRawCounterValue")
- pdhGetRawCounterArrayW = libPdhDll.NewProc("PdhGetRawCounterArrayW")
- pdhPdhGetCounterTimeBase = libPdhDll.NewProc("PdhGetCounterTimeBase")
-)
-
-// PdhAddCounter adds the specified counter to the query. This is the internationalized version. Preferably, use the
-// function PdhAddEnglishCounter instead. hQuery is the query handle, which has been fetched by PdhOpenQuery.
-// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version).
-// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value
-// later, call PdhGetCounterInfo() and access dwQueryUserData of the PdhCounterInfo structure.
-//
-// Examples of szFullCounterPath (in an English version of Windows):
-//
-// \\Processor(_Total)\\% Idle Time
-// \\Processor(_Total)\\% Processor Time
-// \\LogicalDisk(C:)\% Free Space
-//
-// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility,
-// the typeperf command, and the v1 editor. perfmon.exe is perhaps the easiest way, because it's basically a
-// full implementation of the pdh.dll API, except with a GUI and all that. The v1 setting also provides an
-// interface to the available counters, and can be found at the following key:
-//
-// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage
-//
-// This v1 key contains several values as follows:
-//
-// 1
-// 1847
-// 2
-// System
-// 4
-// Memory
-// 6
-// % Processor Time
-// ... many, many more
-//
-// Somehow, these numeric values can be used as szFullCounterPath too:
-//
-// \2\6 will correspond to \\System\% Processor Time
-//
-// The typeperf command may also be pretty easy. To find all performance counters, simply execute:
-//
-// typeperf -qx
-func PdhAddCounter(hQuery pdhQueryHandle, szFullCounterPath string, dwUserData uintptr, phCounter *pdhCounterHandle) uint32 {
- ptxt, _ := windows.UTF16PtrFromString(szFullCounterPath)
- ret, _, _ := pdhAddCounterW.Call(
- uintptr(hQuery),
- uintptr(unsafe.Pointer(ptxt)),
- dwUserData,
- uintptr(unsafe.Pointer(phCounter)))
-
- return uint32(ret)
-}
-
-// PdhAddEnglishCounter adds the specified language-neutral counter to the query. See the PdhAddCounter function. This function only exists on
-// Windows versions higher than Vista.
-func PdhAddEnglishCounter(hQuery pdhQueryHandle, szFullCounterPath string, dwUserData uintptr, phCounter *pdhCounterHandle) uint32 {
- if pdhAddEnglishCounterW == nil {
- return ErrorInvalidFunction
- }
-
- ptxt, _ := windows.UTF16PtrFromString(szFullCounterPath)
- ret, _, _ := pdhAddEnglishCounterW.Call(
- uintptr(hQuery),
- uintptr(unsafe.Pointer(ptxt)),
- dwUserData,
- uintptr(unsafe.Pointer(phCounter)))
-
- return uint32(ret)
-}
-
-// PdhCloseQuery closes all counters contained in the specified query, closes all handles related to the query,
-// and frees all memory associated with the query.
-func PdhCloseQuery(hQuery pdhQueryHandle) uint32 {
- ret, _, _ := pdhCloseQuery.Call(uintptr(hQuery))
-
- return uint32(ret)
-}
-
-// PdhCollectQueryData collects the current raw data value for all counters in the specified query and updates the status
-// code of each counter. With some counters, this function needs to be repeatedly called before the value
-// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code
-// requires at least two calls:
-//
-// var handle win.PDH_HQUERY
-// var counterHandle win.PDH_HCOUNTER
-// ret := win.PdhOpenQuery(0, 0, &handle)
-// ret = win.PdhAddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle)
-// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE
-//
-// ret = win.PdhCollectQueryData(handle)
-// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA
-// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
-//
-// ret = win.PdhCollectQueryData(handle)
-// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS
-// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
-//
-// The PdhCollectQueryData will return an error in the first call because it needs two values for
-// displaying the correct data for the processor idle time. The second call will have a 0 return code.
-func PdhCollectQueryData(hQuery pdhQueryHandle) uint32 {
- ret, _, _ := pdhCollectQueryData.Call(uintptr(hQuery))
-
- return uint32(ret)
-}
-
-// PdhCollectQueryDataWithTime queries data from perfmon, retrieving the device/windows timestamp from the node it was collected on.
-// Converts the filetime structure to a GO time class and returns the native time.
-func PdhCollectQueryDataWithTime(hQuery pdhQueryHandle) (uint32, time.Time) {
- var localFileTime windows.Filetime
-
- ret, _, _ := pdhCollectQueryDataWithTime.Call(uintptr(hQuery), uintptr(unsafe.Pointer(&localFileTime)))
-
- if ret == ErrorSuccess {
- var utcFileTime windows.Filetime
-
- if ret := kernel32.LocalFileTimeToFileTime(&localFileTime, &utcFileTime); ret == 0 {
- return uint32(ErrorFailure), time.Now()
- }
-
- retTime := time.Unix(0, utcFileTime.Nanoseconds())
-
- return uint32(ErrorSuccess), retTime
- }
-
- return uint32(ret), time.Now()
-}
-
-// PdhGetFormattedCounterValueDouble formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue.
-// This function does not directly translate to a Windows counterpart due to union specialization tricks.
-func PdhGetFormattedCounterValueDouble(hCounter pdhCounterHandle, lpdwType *uint32, pValue *PdhFmtCountervalueDouble) uint32 {
- ret, _, _ := pdhGetFormattedCounterValue.Call(
- uintptr(hCounter),
- uintptr(PdhFmtDouble|PdhFmtNocap100),
- uintptr(unsafe.Pointer(lpdwType)),
- uintptr(unsafe.Pointer(pValue)))
-
- return uint32(ret)
-}
-
-// PdhGetFormattedCounterArrayDouble returns an array of formatted counter values. Use this function when you want to format the counter values of a
-// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PdhFmtCountervalueItemDouble.
-// An example of how this function can be used:
-//
-// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character
-//
-// // omitted all necessary stuff ...
-//
-// var bufSize uint32
-// var bufCount uint32
-// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
-// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
-//
-// for {
-// // collect
-// ret := win.PdhCollectQueryData(queryHandle)
-// if ret == win.ERROR_SUCCESS {
-// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
-// if ret == win.PDH_MORE_DATA {
-// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
-// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0])
-// for i := 0; i < int(bufCount); i++ {
-// c := filledBuf[i]
-// var s string = win.UTF16PtrToString(c.SzName)
-// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue)
-// }
-//
-// filledBuf = nil
-// // Need to at least set bufSize to zero, because if not, the function will not
-// // return PDH_MORE_DATA and will not set the bufSize.
-// bufCount = 0
-// bufSize = 0
-// }
-//
-// time.Sleep(2000 * time.Millisecond)
-// }
-// }
-func PdhGetFormattedCounterArrayDouble(hCounter pdhCounterHandle, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *byte) uint32 {
- ret, _, _ := pdhGetFormattedCounterArrayW.Call(
- uintptr(hCounter),
- uintptr(PdhFmtDouble|PdhFmtNocap100),
- uintptr(unsafe.Pointer(lpdwBufferSize)),
- uintptr(unsafe.Pointer(lpdwBufferCount)),
- uintptr(unsafe.Pointer(itemBuffer)))
-
- return uint32(ret)
-}
-
-// PdhOpenQuery creates a new query that is used to manage the collection of performance data.
-// szDataSource is a null terminated string that specifies the name of the log file from which to
-// retrieve the performance data. If 0, performance data is collected from a real-time data source.
-// dwUserData is a user-defined value to associate with this query. To retrieve the user data later,
-// call PdhGetCounterInfo and access dwQueryUserData of the PdhCounterInfo structure. phQuery is
-// the handle to the query, and must be used in subsequent calls. This function returns a PDH_
-// constant error code, or ErrorSuccess if the call succeeded.
-func PdhOpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *pdhQueryHandle) uint32 {
- ret, _, _ := pdhOpenQuery.Call(
- szDataSource,
- dwUserData,
- uintptr(unsafe.Pointer(phQuery)))
-
- return uint32(ret)
-}
-
-// PdhExpandWildCardPath examines the specified computer or log file and returns those counter paths that match the given counter path
-// which contains wildcard characters. The general counter path format is as follows:
-//
-// \\computer\object(parent/instance#index)\counter
-//
-// The parent, instance, index, and counter components of the counter path may contain either a valid name or a wildcard character.
-// The computer, parent, instance, and index components are not necessary for all counters.
-//
-// The following is a list of the possible formats:
-//
-// \\computer\object(parent/instance#index)\counter
-// \\computer\object(parent/instance)\counter
-// \\computer\object(instance#index)\counter
-// \\computer\object(instance)\counter
-// \\computer\object\counter
-// \object(parent/instance#index)\counter
-// \object(parent/instance)\counter
-// \object(instance#index)\counter
-// \object(instance)\counter
-// \object\counter
-// Use an asterisk (*) as the wildcard character, for example, \object(*)\counter.
-//
-// If a wildcard character is specified in the parent name, all instances of the specified object
-// that match the specified instance and counter fields will be returned.
-// For example, \object(*/instance)\counter.
-//
-// If a wildcard character is specified in the instance name, all instances of the specified object and parent object will be returned if all instance names
-// corresponding to the specified index match the wildcard character. For example, \object(parent/*)\counter.
-// If the object does not contain an instance, an error occurs.
-//
-// If a wildcard character is specified in the counter name, all counters of the specified object are returned.
-//
-// Partial counter path string matches (for example, "pro*") are supported.
-func PdhExpandWildCardPath(szWildCardPath string, mszExpandedPathList *uint16, pcchPathListLength *uint32) uint32 {
- ptxt, _ := windows.UTF16PtrFromString(szWildCardPath)
- flags := uint32(0) // expand instances and counters
- ret, _, _ := pdhExpandWildCardPathW.Call(
- 0, // search counters on local computer
- uintptr(unsafe.Pointer(ptxt)),
- uintptr(unsafe.Pointer(mszExpandedPathList)),
- uintptr(unsafe.Pointer(pcchPathListLength)),
- uintptr(unsafe.Pointer(&flags)))
-
- return uint32(ret)
-}
-
-// PdhValidatePath validates a path. Will return ErrorSuccess when ok, or PdhCstatusBadCountername when the path is erroneous.
-func PdhValidatePath(path string) uint32 {
- ptxt, _ := windows.UTF16PtrFromString(path)
- ret, _, _ := pdhValidatePathW.Call(uintptr(unsafe.Pointer(ptxt)))
-
- return uint32(ret)
-}
-
-func PdhFormatError(msgID uint32) string {
- var flags uint32 = windows.FORMAT_MESSAGE_FROM_HMODULE | windows.FORMAT_MESSAGE_ARGUMENT_ARRAY | windows.FORMAT_MESSAGE_IGNORE_INSERTS
-
- buf := make([]uint16, 300)
- _, err := windows.FormatMessage(flags, libPdhDll.Handle(), msgID, 0, buf, nil)
-
- if err == nil {
- return windows.UTF16PtrToString(&buf[0])
- }
-
- return fmt.Sprintf("(pdhErr=%d) %s", msgID, err.Error())
-}
-
-// PdhGetCounterInfo retrieves information about a counter, such as data size, counter type, path, and user-supplied data values
-// hCounter [in]
-// Handle of the counter from which you want to retrieve information. The PdhAddCounter function returns this handle.
-//
-// bRetrieveExplainText [in]
-// Determines whether explain text is retrieved. If you set this parameter to TRUE, the explain text for the counter is retrieved.
-// If you set this parameter to FALSE, the field in the returned buffer is NULL.
-//
-// pdwBufferSize [in, out]
-// Size of the lpBuffer buffer, in bytes. If zero on input, the function returns PdhMoreData and sets this parameter to the required buffer size.
-// If the buffer is larger than the required size, the function sets this parameter to the actual size of the buffer that was used.
-// If the specified size on input is greater than zero but less than the required size, you should not rely on the returned size to reallocate the buffer.
-//
-// lpBuffer [out]
-// Caller-allocated buffer that receives a PdhCounterInfo structure.
-// The structure is variable-length, because the string data is appended to the end of the fixed-format portion of the structure.
-// This is done so that all data is returned in a single buffer allocated by the caller. Set to NULL if pdwBufferSize is zero.
-func PdhGetCounterInfo(hCounter pdhCounterHandle, bRetrieveExplainText int, pdwBufferSize *uint32, lpBuffer *byte) uint32 {
- ret, _, _ := pdhGetCounterInfoW.Call(
- uintptr(hCounter),
- uintptr(bRetrieveExplainText),
- uintptr(unsafe.Pointer(pdwBufferSize)),
- uintptr(unsafe.Pointer(lpBuffer)))
-
- return uint32(ret)
-}
-
-// PdhGetRawCounterValue returns the current raw value of the counter.
-// If the specified counter instance does not exist, this function will return ErrorSuccess
-// and the CStatus member of the PdhRawCounter structure will contain PdhCstatusNoInstance.
-//
-// hCounter [in]
-// Handle of the counter from which to retrieve the current raw value. The PdhAddCounter function returns this handle.
-//
-// lpdwType [out]
-// Receives the counter type. For a list of counter types, see the Counter Types section of the Windows Server 2003 Deployment Kit.
-// This parameter is optional.
-//
-// pValue [out]
-// A PdhRawCounter structure that receives the counter value.
-func PdhGetRawCounterValue(hCounter pdhCounterHandle, lpdwType *uint32, pValue *PdhRawCounter) uint32 {
- ret, _, _ := pdhGetRawCounterValue.Call(
- uintptr(hCounter),
- uintptr(unsafe.Pointer(lpdwType)),
- uintptr(unsafe.Pointer(pValue)))
-
- return uint32(ret)
-}
-
-// PdhGetRawCounterArray returns an array of raw values from the specified counter. Use this function when you want to retrieve the raw counter values
-// of a counter that contains a wildcard character for the instance name.
-// hCounter
-// Handle of the counter for whose current raw instance values you want to retrieve. The PdhAddCounter function returns this handle.
-//
-// lpdwBufferSize
-// Size of the ItemBuffer buffer, in bytes. If zero on input, the function returns PdhMoreData and sets this parameter to the required buffer size.
-// If the buffer is larger than the required size, the function sets this parameter to the actual size of the buffer that was used.
-// If the specified size on input is greater than zero but less than the required size, you should not rely on the returned size to reallocate the buffer.
-//
-// lpdwItemCount
-// Number of raw counter values in the ItemBuffer buffer.
-//
-// ItemBuffer
-// Caller-allocated buffer that receives the array of PdhRawCounterItem structures; the structures contain the raw instance counter values.
-// Set to NULL if lpdwBufferSize is zero.
-func PdhGetRawCounterArray(hCounter pdhCounterHandle, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *byte) uint32 {
- ret, _, _ := pdhGetRawCounterArrayW.Call(
- uintptr(hCounter),
- uintptr(unsafe.Pointer(lpdwBufferSize)),
- uintptr(unsafe.Pointer(lpdwBufferCount)),
- uintptr(unsafe.Pointer(itemBuffer)))
-
- return uint32(ret)
-}
-
-// PdhGetCounterTimeBase returns the time base of the specified counter.
-// hCounter
-// Handle of the counter for whose current raw instance values you want to retrieve. The PdhAddCounter function returns this handle.
-//
-// lpdwItemCount
-// Time base that specifies the number of performance values a counter samples per second.
-func PdhGetCounterTimeBase(hCounter pdhCounterHandle, pTimeBase *int64) uint32 {
- ret, _, _ := pdhPdhGetCounterTimeBase.Call(
- uintptr(hCounter),
- uintptr(unsafe.Pointer(pTimeBase)))
-
- return uint32(ret)
-}
diff --git a/internal/perfdata/pdh_arm64.go b/internal/perfdata/pdh_arm64.go
deleted file mode 100644
index 0047e6acd..000000000
--- a/internal/perfdata/pdh_arm64.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright (c) 2010-2024 The win Authors. All rights reserved.
-// Copyright (c) 2024 The prometheus-community Authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// This is the official list of 'win' authors for copyright purposes.
-//
-// Alexander Neumann
-// Joseph Watson
-// Kevin Pors
-
-//go:build windows
-
-package perfdata
-
-import "golang.org/x/sys/windows"
-
-// PdhFmtCountervalueDouble is a union specialization for double values.
-type PdhFmtCountervalueDouble struct {
- CStatus uint32
- DoubleValue float64
-}
-
-// PdhFmtCounterValueLarge is a union specialization for 64-bit integer values.
-type PdhFmtCountervalueLarge struct {
- CStatus uint32
- LargeValue int64
-}
-
-// PdhFmtCounterValueLong is a union specialization for long values.
-type PdhFmtCountervalueLong struct {
- CStatus uint32
- LongValue int32
- padding [4]byte //nolint:unused // Memory reservation
-}
-
-type PdhFmtCountervalueItemDouble struct {
- SzName *uint16
- FmtValue PdhFmtCountervalueDouble
-}
-
-// PdhFmtCounterValueItemLarge is a union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge().
-type PdhFmtCountervalueItemLarge struct {
- SzName *uint16 // pointer to a string
- FmtValue PdhFmtCountervalueLarge
-}
-
-// PdhFmtCounterValueItemLong is a union specialization for long values, used by PdhGetFormattedCounterArrayLong().
-type PdhFmtCountervalueItemLong struct {
- SzName *uint16 // pointer to a string
- FmtValue PdhFmtCountervalueLong
-}
-
-// PdhCounterInfo structure contains information describing the properties of a counter. This information also includes the counter path.
-type PdhCounterInfo struct {
- // Size of the structure, including the appended strings, in bytes.
- DwLength uint32
- // Counter type. For a list of counter types, see the Counter Types section
- // of the Windows Server 2003 Deployment Kit (http://go.microsoft.com/fwlink/p/?linkid=84422).
- // The counter type constants are defined in Winperf.h.
- DwType uint32
- // Counter version information. Not used.
- CVersion uint32
- // Counter status that indicates if the counter value is valid. For a list of possible values,
- // see Checking PDH Interface Return Values.
- CStatus uint32
- // Scale factor to use when computing the displayable value of the counter. The scale factor is a power of ten.
- // The valid range of this parameter is PDH_MIN_SCALE (–7) (the returned value is the actual value times 10–⁷) to
- // Pdh_MAX_SCALE (+7) (the returned value is the actual value times 10⁺⁷). A value of zero will set the scale to one, so that the actual value is returned.
- LScale int32
- // Default scale factor as suggested by the counter's provider.
- LDefaultScale int32
- // The value passed in the dwUserData parameter when calling PdhAddCounter.
- DwUserData *uint32
- // The value passed in the dwUserData parameter when calling PdhOpenQuery.
- DwQueryUserData *uint32
- // Null-terminated string that specifies the full counter path. The string follows this structure in memory.
- SzFullPath *uint16 // pointer to a string
- // Null-terminated string that contains the name of the computer specified in the counter path. Is NULL, if the path does not specify a computer.
- // The string follows this structure in memory.
- SzMachineName *uint16 // pointer to a string
- // Null-terminated string that contains the name of the performance object specified in the counter path. The string follows this structure in memory.
- SzObjectName *uint16 // pointer to a string
- // Null-terminated string that contains the name of the object instance specified in the counter path. Is NULL, if the path does not specify an instance.
- // The string follows this structure in memory.
- SzInstanceName *uint16 // pointer to a string
- // Null-terminated string that contains the name of the parent instance specified in the counter path.
- // Is NULL, if the path does not specify a parent instance.
- // The string follows this structure in memory.
- SzParentInstance *uint16 // pointer to a string
- // Instance index specified in the counter path. Is 0, if the path does not specify an instance index.
- DwInstanceIndex uint32 // pointer to a string
- // Null-terminated string that contains the counter name. The string follows this structure in memory.
- SzCounterName *uint16 // pointer to a string
- // Help text that describes the counter. Is NULL if the source is a log file.
- SzExplainText *uint16 // pointer to a string
- // Start of the string data that is appended to the structure.
- DataBuffer [1]uint32 // pointer to an extra space
-}
-
-// The PdhRawCounter structure returns the data as it was collected from the counter provider.
-// No translation, formatting, or other interpretation is performed on the data.
-type PdhRawCounter struct {
- // Counter status that indicates if the counter value is valid. Check this member before using the data in a calculation or displaying its value.
- // For a list of possible values, see https://docs.microsoft.com/windows/desktop/PerfCtrs/checking-pdh-interface-return-values
- CStatus uint32
- // Local time for when the data was collected
- TimeStamp windows.Filetime
- // First raw counter value.
- FirstValue int64
- // Second raw counter value. Rate counters require two values in order to compute a displayable value.
- SecondValue int64
- // If the counter type contains the PERF_MULTI_COUNTER flag, this member contains the additional counter data used in the calculation.
- // For example, the PERF_100NSEC_MULTI_TIMER counter type contains the PERF_MULTI_COUNTER flag.
- MultiCount uint32
-}
-
-type PdhRawCounterItem struct {
- // Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure.
- SzName *uint16
- // A PdhRawCounter structure that contains the raw counter value of the instance
- RawValue PdhRawCounter
-}
diff --git a/internal/perfdata/types.go b/internal/perfdata/types.go
deleted file mode 100644
index c62456b59..000000000
--- a/internal/perfdata/types.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package perfdata
-
-import "github.com/prometheus/client_golang/prometheus"
-
-const (
- InstanceEmpty = "------"
- InstanceTotal = "_Total"
-)
-
-type CounterValue struct {
- Type prometheus.ValueType
- FirstValue float64
- SecondValue float64
-}
diff --git a/internal/utils/testutils/testutils.go b/internal/utils/testutils/testutils.go
index 90e9e97d3..d35f30a01 100644
--- a/internal/utils/testutils/testutils.go
+++ b/internal/utils/testutils/testutils.go
@@ -27,19 +27,25 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/collector/update"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/pkg/collector"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"golang.org/x/sys/windows"
)
-func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, collectFunc collector.BuilderWithFlags[C]) {
+func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, collectFunc collector.BuilderWithFlags[C], fn ...func(app *kingpin.Application)) {
b.Helper()
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
- c := collectFunc(kingpin.CommandLine)
+ app := kingpin.New("windows_exporter", "Windows metrics exporter.")
+ c := collectFunc(app)
+
+ for _, f := range fn {
+ f(app)
+ }
+
collectors := collector.New(map[string]collector.Collector{name: c})
require.NoError(b, collectors.Build(logger))
@@ -96,8 +102,8 @@ func TestCollector[C collector.Collector, V interface{}](t *testing.T, fn func(*
switch {
case err == nil:
case errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE),
- errors.Is(err, perfdata.NewPdhError(perfdata.PdhCstatusNoCounter)),
- errors.Is(err, perfdata.NewPdhError(perfdata.PdhCstatusNoObject)),
+ errors.Is(err, pdh.NewPdhError(pdh.CstatusNoCounter)),
+ errors.Is(err, pdh.NewPdhError(pdh.CstatusNoObject)),
errors.Is(err, update.ErrUpdateServiceDisabled),
errors.Is(err, os.ErrNotExist):
default:
@@ -111,8 +117,8 @@ func TestCollector[C collector.Collector, V interface{}](t *testing.T, fn func(*
switch {
// container collector
case errors.Is(err, windows.Errno(2151088411)),
- errors.Is(err, perfdata.ErrPerformanceCounterNotInitialized),
- errors.Is(err, perfdata.ErrNoData),
+ errors.Is(err, pdh.ErrPerformanceCounterNotInitialized),
+ errors.Is(err, pdh.ErrNoData),
errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE),
errors.Is(err, mi.MI_RESULT_INVALID_QUERY),
errors.Is(err, update.ErrNoUpdates):
diff --git a/pkg/collector/collect.go b/pkg/collector/collect.go
index 233bd27a7..f180caaf6 100644
--- a/pkg/collector/collect.go
+++ b/pkg/collector/collect.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -192,7 +192,7 @@ func (c *Collection) collectCollector(ch chan<- prometheus.Metric, logger *slog.
name,
)
- logger.Warn(fmt.Sprintf("collector %s timeouted after %s, resulting in %d metrics", name, maxScrapeDuration, numMetrics))
+ logger.LogAttrs(ctx, slog.LevelWarn, fmt.Sprintf("collector %s timeouted after %s, resulting in %d metrics", name, maxScrapeDuration, numMetrics))
go func() {
// Drain channel in case of premature return to not leak a goroutine.
@@ -204,21 +204,20 @@ func (c *Collection) collectCollector(ch chan<- prometheus.Metric, logger *slog.
return pending
}
- if err != nil && !errors.Is(err, perfdata.ErrNoData) && !errors.Is(err, types.ErrNoData) {
- loggerFn := logger.Warn
-
- if errors.Is(err, perfdata.ErrPerformanceCounterNotInitialized) || errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE) {
+ if err != nil && !errors.Is(err, pdh.ErrNoData) && !errors.Is(err, types.ErrNoData) {
+ if errors.Is(err, pdh.ErrPerformanceCounterNotInitialized) || errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE) {
err = fmt.Errorf("%w. Check application logs from initialization pharse for more information", err)
}
- loggerFn(fmt.Sprintf("collector %s failed after %s, resulting in %d metrics", name, duration, numMetrics),
+ logger.LogAttrs(ctx, slog.LevelWarn,
+ fmt.Sprintf("collector %s failed after %s, resulting in %d metrics", name, duration, numMetrics),
slog.Any("err", err),
)
return failed
}
- logger.Debug(fmt.Sprintf("collector %s succeeded after %s, resulting in %d metrics", name, duration, numMetrics))
+ logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("collector %s succeeded after %s, resulting in %d metrics", name, duration, numMetrics))
return success
}
diff --git a/pkg/collector/collection.go b/pkg/collector/collection.go
index c79f36386..3bc331cd2 100644
--- a/pkg/collector/collection.go
+++ b/pkg/collector/collection.go
@@ -16,6 +16,7 @@
package collector
import (
+ "context"
"errors"
"fmt"
"log/slog"
@@ -74,7 +75,7 @@ import (
"github.com/prometheus-community/windows_exporter/internal/collector/update"
"github.com/prometheus-community/windows_exporter/internal/collector/vmware"
"github.com/prometheus-community/windows_exporter/internal/mi"
- "github.com/prometheus-community/windows_exporter/internal/perfdata"
+ "github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -229,13 +230,11 @@ func (c *Collection) Build(logger *slog.Logger) error {
errs := make([]error, 0, len(c.collectors))
for err := range errCh {
- if errors.Is(err, perfdata.ErrNoData) ||
- errors.Is(err, perfdata.NewPdhError(perfdata.PdhCstatusNoObject)) ||
- errors.Is(err, perfdata.NewPdhError(perfdata.PdhCstatusNoCounter)) ||
+ if errors.Is(err, pdh.ErrNoData) ||
+ errors.Is(err, pdh.NewPdhError(pdh.CstatusNoObject)) ||
+ errors.Is(err, pdh.NewPdhError(pdh.CstatusNoCounter)) ||
errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE) {
- logger.Warn("couldn't initialize collector",
- slog.Any("err", err),
- )
+ logger.LogAttrs(context.Background(), slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err))
continue
}