diff --git a/.golangci.yml b/.golangci.yml index 0dd35b446..227500dba 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -31,10 +31,8 @@ linters: - typecheck - unconvert - unused + - sloglint linters-settings: - errcheck: - exclude-functions: - - (github.com/go-kit/log.Logger).Log goimports: local-prefixes: "github.com/prometheus-community/yet-another-cloudwatch-exporter" diff --git a/cmd/yace/main.go b/cmd/yace/main.go index 731b96966..da384e90a 100644 --- a/cmd/yace/main.go +++ b/cmd/yace/main.go @@ -15,11 +15,15 @@ package main import ( "context" "fmt" + "log/slog" "net/http" "net/http/pprof" "os" + "slices" "strings" + "github.com/prometheus/common/promslog" + promslogflag "github.com/prometheus/common/promslog/flag" "github.com/urfave/cli/v2" "golang.org/x/sync/semaphore" @@ -28,7 +32,6 @@ import ( v1 "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/v1" v2 "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/v2" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" ) const ( @@ -56,7 +59,7 @@ const ( var ( addr string configFile string - debug bool + logLevel string logFormat string fips bool cloudwatchConcurrency cloudwatch.ConcurrencyConfig @@ -66,7 +69,7 @@ var ( labelsSnakeCase bool profilingEnabled bool - logger logging.Logger + logger *slog.Logger ) func main() { @@ -74,9 +77,11 @@ func main() { if err := app.Run(os.Args); err != nil { // if we exit very early we'll not have set up the logger yet if logger == nil { - logger = logging.NewLogger(defaultLogFormat, debug, "version", version) + jsonFmt := &promslog.AllowedFormat{} + _ = jsonFmt.Set("json") + logger = promslog.New(&promslog.Config{Format: jsonFmt}) } - logger.Error(err, "Error running yace") + logger.Error("Error running yace", "err", err) os.Exit(1) } } @@ -107,23 +112,25 @@ func NewYACEApp() *cli.App { Destination: &configFile, EnvVars: []string{"config.file"}, }, - &cli.BoolFlag{ - Name: "debug", - Value: false, - Usage: "Verbose logging", - Destination: &debug, - EnvVars: []string{"debug"}, + &cli.StringFlag{ + Name: "log.level", + Value: "", + Usage: promslogflag.LevelFlagHelp, + Destination: &logLevel, + Action: func(_ *cli.Context, s string) error { + if !slices.Contains(promslog.LevelFlagOptions, s) { + return fmt.Errorf("unrecognized log format %q", s) + } + return nil + }, }, &cli.StringFlag{ Name: "log.format", Value: defaultLogFormat, - Usage: "Output format of log messages. One of: [logfmt, json]. Default: [json].", + Usage: promslogflag.FormatFlagHelp, Destination: &logFormat, Action: func(_ *cli.Context, s string) error { - switch s { - case "logfmt", "json": - break - default: + if !slices.Contains(promslog.FormatFlagOptions, s) { return fmt.Errorf("unrecognized log format %q", s) } return nil @@ -212,11 +219,11 @@ func NewYACEApp() *cli.App { &cli.StringFlag{Name: "config.file", Value: "config.yml", Usage: "Path to configuration file.", Destination: &configFile}, }, Action: func(_ *cli.Context) error { - logger = logging.NewLogger(logFormat, debug, "version", version) + logger = newLogger(logFormat, logLevel).With("version", version) logger.Info("Parsing config") cfg := config.ScrapeConf{} if _, err := cfg.Load(configFile, logger); err != nil { - logger.Error(err, "Couldn't read config file", "path", configFile) + logger.Error("Couldn't read config file", "err", err, "path", configFile) os.Exit(1) } logger.Info("Config file is valid", "path", configFile) @@ -242,7 +249,7 @@ func NewYACEApp() *cli.App { } func startScraper(c *cli.Context) error { - logger = logging.NewLogger(logFormat, debug, "version", version) + logger = newLogger(logFormat, logLevel).With("version", version) // log warning if the two concurrency limiting methods are configured via CLI if c.IsSet("cloudwatch-concurrency") && c.IsSet("cloudwatch-concurrency.per-api-limit-enabled") { @@ -310,7 +317,7 @@ func startScraper(c *cli.Context) error { newCfg := config.ScrapeConf{} newJobsCfg, err := newCfg.Load(configFile, logger) if err != nil { - logger.Error(err, "Couldn't read config file", "path", configFile) + logger.Error("Couldn't read config file", "err", err, "path", configFile) return } @@ -323,7 +330,7 @@ func startScraper(c *cli.Context) error { // Can't override cache while also creating err cache, err = v2.NewFactory(logger, newJobsCfg, fips) if err != nil { - logger.Error(err, "Failed to construct aws sdk v2 client cache", "path", configFile) + logger.Error("Failed to construct aws sdk v2 client cache", "err", err, "path", configFile) return } } @@ -339,3 +346,16 @@ func startScraper(c *cli.Context) error { srv := &http.Server{Addr: addr, Handler: mux} return srv.ListenAndServe() } + +func newLogger(format, level string) *slog.Logger { + // If flag parsing was successful, then we know that format and level + // are both valid options; no need to error check their returns, just + // set their values. + fmt := &promslog.AllowedFormat{} + _ = fmt.Set(format) + + lvl := &promslog.AllowedLevel{} + _ = fmt.Set(level) + + return promslog.New(&promslog.Config{Format: fmt, Level: lvl}) +} diff --git a/cmd/yace/scraper.go b/cmd/yace/scraper.go index 6082e55be..b5282d1f3 100644 --- a/cmd/yace/scraper.go +++ b/cmd/yace/scraper.go @@ -14,6 +14,7 @@ package main import ( "context" + "log/slog" "net/http" "sync/atomic" "time" @@ -23,7 +24,6 @@ import ( exporter "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -56,7 +56,7 @@ func (s *scraper) makeHandler() func(http.ResponseWriter, *http.Request) { } } -func (s *scraper) decoupled(ctx context.Context, logger logging.Logger, jobsCfg model.JobsConfig, cache cachingFactory) { +func (s *scraper) decoupled(ctx context.Context, logger *slog.Logger, jobsCfg model.JobsConfig, cache cachingFactory) { logger.Debug("Starting scraping async") s.scrape(ctx, logger, jobsCfg, cache) @@ -75,7 +75,7 @@ func (s *scraper) decoupled(ctx context.Context, logger logging.Logger, jobsCfg } } -func (s *scraper) scrape(ctx context.Context, logger logging.Logger, jobsCfg model.JobsConfig, cache cachingFactory) { +func (s *scraper) scrape(ctx context.Context, logger *slog.Logger, jobsCfg model.JobsConfig, cache cachingFactory) { if !sem.TryAcquire(1) { // This shouldn't happen under normal use, users should adjust their configuration when this occurs. // Let them know by logging a warning. @@ -120,7 +120,7 @@ func (s *scraper) scrape(ctx context.Context, logger logging.Logger, jobsCfg mod options..., ) if err != nil { - logger.Error(err, "error updating metrics") + logger.Error("error updating metrics", "err", err) } s.registry.Store(newRegistry) diff --git a/go.mod b/go.mod index 8618b3058..c9c177534 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/storagegateway v1.34.6 github.com/aws/aws-sdk-go-v2/service/sts v1.33.1 github.com/aws/smithy-go v1.22.1 - github.com/go-kit/log v0.2.1 github.com/grafana/regexp v0.0.0-20240607082908-2cb410fa05da github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 @@ -34,6 +33,8 @@ require ( ) require ( + github.com/alecthomas/kingpin/v2 v2.4.0 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect @@ -46,7 +47,6 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/kr/text v0.2.0 // indirect @@ -56,6 +56,7 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect golang.org/x/sys v0.25.0 // indirect google.golang.org/protobuf v1.34.2 // indirect diff --git a/go.sum b/go.sum index e320eac71..679d45aaa 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,7 @@ +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= @@ -58,10 +62,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grafana/regexp v0.0.0-20240607082908-2cb410fa05da h1:BML5sNe+bw2uO8t8cQSwe5QhvoP04eHPF7bnaQma0Kw= @@ -97,6 +97,7 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -106,6 +107,8 @@ github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9 github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA= @@ -119,6 +122,7 @@ google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWn gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/pkg/clients/account/v1/client.go b/pkg/clients/account/v1/client.go index 79d28c430..97c629ee7 100644 --- a/pkg/clients/account/v1/client.go +++ b/pkg/clients/account/v1/client.go @@ -15,6 +15,7 @@ package v1 import ( "context" "errors" + "log/slog" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account" @@ -22,17 +23,15 @@ import ( "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" - - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" ) type client struct { - logger logging.Logger + logger *slog.Logger stsClient stsiface.STSAPI iamClient iamiface.IAMAPI } -func NewClient(logger logging.Logger, stsClient stsiface.STSAPI, iamClient iamiface.IAMAPI) account.Client { +func NewClient(logger *slog.Logger, stsClient stsiface.STSAPI, iamClient iamiface.IAMAPI) account.Client { return &client{ logger: logger, stsClient: stsClient, diff --git a/pkg/clients/account/v2/client.go b/pkg/clients/account/v2/client.go index 0cdb63c38..253204489 100644 --- a/pkg/clients/account/v2/client.go +++ b/pkg/clients/account/v2/client.go @@ -15,21 +15,21 @@ package v2 import ( "context" "errors" + "log/slog" "github.com/aws/aws-sdk-go-v2/service/iam" "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" ) type client struct { - logger logging.Logger + logger *slog.Logger stsClient *sts.Client iamClient *iam.Client } -func NewClient(logger logging.Logger, stsClient *sts.Client, iamClient *iam.Client) account.Client { +func NewClient(logger *slog.Logger, stsClient *sts.Client, iamClient *iam.Client) account.Client { return &client{ logger: logger, stsClient: stsClient, diff --git a/pkg/clients/cloudwatch/client.go b/pkg/clients/cloudwatch/client.go index e36599065..44b06113b 100644 --- a/pkg/clients/cloudwatch/client.go +++ b/pkg/clients/cloudwatch/client.go @@ -14,9 +14,9 @@ package cloudwatch import ( "context" + "log/slog" "time" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -37,7 +37,7 @@ type Client interface { GetMetricData(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []MetricDataResult // GetMetricStatistics returns the output of the GetMetricStatistics CloudWatch API. - GetMetricStatistics(ctx context.Context, logger logging.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint + GetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint } // ConcurrencyLimiter limits the concurrency when calling AWS CloudWatch APIs. The functions implemented @@ -73,7 +73,7 @@ func NewLimitedConcurrencyClient(client Client, limiter ConcurrencyLimiter) Clie } } -func (c limitedConcurrencyClient) GetMetricStatistics(ctx context.Context, logger logging.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint { +func (c limitedConcurrencyClient) GetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint { c.limiter.Acquire(getMetricStatisticsCall) res := c.client.GetMetricStatistics(ctx, logger, dimensions, namespace, metric) c.limiter.Release(getMetricStatisticsCall) diff --git a/pkg/clients/cloudwatch/v1/client.go b/pkg/clients/cloudwatch/v1/client.go index 30947e2ed..64bf2924b 100644 --- a/pkg/clients/cloudwatch/v1/client.go +++ b/pkg/clients/cloudwatch/v1/client.go @@ -14,6 +14,7 @@ package v1 import ( "context" + "log/slog" "time" "github.com/aws/aws-sdk-go/aws" @@ -21,17 +22,16 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" cloudwatch_client "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) type client struct { - logger logging.Logger + logger *slog.Logger cloudwatchAPI cloudwatchiface.CloudWatchAPI } -func NewClient(logger logging.Logger, cloudwatchAPI cloudwatchiface.CloudWatchAPI) cloudwatch_client.Client { +func NewClient(logger *slog.Logger, cloudwatchAPI cloudwatchiface.CloudWatchAPI) cloudwatch_client.Client { return &client{ logger: logger, cloudwatchAPI: cloudwatchAPI, @@ -47,25 +47,21 @@ func (c client) ListMetrics(ctx context.Context, namespace string, metric *model filter.RecentlyActive = aws.String("PT3H") } - if c.logger.IsDebugEnabled() { - c.logger.Debug("ListMetrics", "input", filter) - } + c.logger.Debug("ListMetrics", "input", filter) err := c.cloudwatchAPI.ListMetricsPagesWithContext(ctx, filter, func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool { promutil.CloudwatchAPICounter.WithLabelValues("ListMetrics").Inc() metricsPage := toModelMetric(page) - if c.logger.IsDebugEnabled() { - c.logger.Debug("ListMetrics", "output", metricsPage, "last_page", lastPage) - } + c.logger.Debug("ListMetrics", "output", metricsPage, "last_page", lastPage) fn(metricsPage) return !lastPage }) if err != nil { promutil.CloudwatchAPIErrorCounter.WithLabelValues("ListMetrics").Inc() - c.logger.Error(err, "ListMetrics error") + c.logger.Error("ListMetrics error", "err", err) return err } @@ -122,9 +118,7 @@ func (c client) GetMetricData(ctx context.Context, getMetricData []*model.Cloudw ScanBy: aws.String("TimestampDescending"), } promutil.CloudwatchGetMetricDataAPIMetricsCounter.Add(float64(len(input.MetricDataQueries))) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricData", "input", input) - } + c.logger.Debug("GetMetricData", "input", input) var resp cloudwatch.GetMetricDataOutput // Using the paged version of the function @@ -136,13 +130,11 @@ func (c client) GetMetricData(ctx context.Context, getMetricData []*model.Cloudw return !lastPage }) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricData", "output", resp) - } + c.logger.Debug("GetMetricData", "output", resp) if err != nil { promutil.CloudwatchAPIErrorCounter.WithLabelValues("GetMetricData").Inc() - c.logger.Error(err, "GetMetricData error") + c.logger.Error("GetMetricData error", "err", err) return nil } return toMetricDataResult(resp) @@ -161,25 +153,21 @@ func toMetricDataResult(resp cloudwatch.GetMetricDataOutput) []cloudwatch_client return output } -func (c client) GetMetricStatistics(ctx context.Context, logger logging.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint { +func (c client) GetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint { filter := createGetMetricStatisticsInput(dimensions, &namespace, metric, logger) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricStatistics", "input", filter) - } + c.logger.Debug("GetMetricStatistics", "input", filter) resp, err := c.cloudwatchAPI.GetMetricStatisticsWithContext(ctx, filter) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricStatistics", "output", resp) - } + c.logger.Debug("GetMetricStatistics", "output", resp) promutil.CloudwatchGetMetricStatisticsAPICounter.Inc() promutil.CloudwatchAPICounter.WithLabelValues("GetMetricStatistics").Inc() if err != nil { promutil.CloudwatchAPIErrorCounter.WithLabelValues("GetMetricStatistics").Inc() - c.logger.Error(err, "Failed to get metric statistics") + c.logger.Error("Failed to get metric statistics", "err", err) return nil } diff --git a/pkg/clients/cloudwatch/v1/input.go b/pkg/clients/cloudwatch/v1/input.go index b317ff5b7..309c03dbe 100644 --- a/pkg/clients/cloudwatch/v1/input.go +++ b/pkg/clients/cloudwatch/v1/input.go @@ -13,6 +13,7 @@ package v1 import ( + "log/slog" "strconv" "strings" "time" @@ -20,7 +21,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) @@ -38,7 +38,7 @@ func toCloudWatchDimensions(dimensions []model.Dimension) []*cloudwatch.Dimensio return cwDim } -func createGetMetricStatisticsInput(dimensions []model.Dimension, namespace *string, metric *model.MetricConfig, logger logging.Logger) *cloudwatch.GetMetricStatisticsInput { +func createGetMetricStatisticsInput(dimensions []model.Dimension, namespace *string, metric *model.MetricConfig, logger *slog.Logger) *cloudwatch.GetMetricStatisticsInput { period := metric.Period length := metric.Length delay := metric.Delay @@ -66,17 +66,15 @@ func createGetMetricStatisticsInput(dimensions []model.Dimension, namespace *str ExtendedStatistics: extendedStatistics, } - if logger.IsDebugEnabled() { - logger.Debug("CLI helper - " + - "aws cloudwatch get-metric-statistics" + - " --metric-name " + metric.Name + - " --dimensions " + dimensionsToCliString(dimensions) + - " --namespace " + *namespace + - " --statistics " + *statistics[0] + - " --period " + strconv.FormatInt(period, 10) + - " --start-time " + startTime.Format(time.RFC3339) + - " --end-time " + endTime.Format(time.RFC3339)) - } + logger.Debug("CLI helper - " + + "aws cloudwatch get-metric-statistics" + + " --metric-name " + metric.Name + + " --dimensions " + dimensionsToCliString(dimensions) + + " --namespace " + *namespace + + " --statistics " + *statistics[0] + + " --period " + strconv.FormatInt(period, 10) + + " --start-time " + startTime.Format(time.RFC3339) + + " --end-time " + endTime.Format(time.RFC3339)) return output } diff --git a/pkg/clients/cloudwatch/v2/client.go b/pkg/clients/cloudwatch/v2/client.go index b47d13fad..92a263b3a 100644 --- a/pkg/clients/cloudwatch/v2/client.go +++ b/pkg/clients/cloudwatch/v2/client.go @@ -14,6 +14,7 @@ package v2 import ( "context" + "log/slog" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -21,17 +22,16 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" cloudwatch_client "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) type client struct { - logger logging.Logger + logger *slog.Logger cloudwatchAPI *cloudwatch.Client } -func NewClient(logger logging.Logger, cloudwatchAPI *cloudwatch.Client) cloudwatch_client.Client { +func NewClient(logger *slog.Logger, cloudwatchAPI *cloudwatch.Client) cloudwatch_client.Client { return &client{ logger: logger, cloudwatchAPI: cloudwatchAPI, @@ -47,9 +47,7 @@ func (c client) ListMetrics(ctx context.Context, namespace string, metric *model filter.RecentlyActive = types.RecentlyActivePt3h } - if c.logger.IsDebugEnabled() { - c.logger.Debug("ListMetrics", "input", filter) - } + c.logger.Debug("ListMetrics", "input", filter) paginator := cloudwatch.NewListMetricsPaginator(c.cloudwatchAPI, filter, func(options *cloudwatch.ListMetricsPaginatorOptions) { options.StopOnDuplicateToken = true @@ -60,14 +58,12 @@ func (c client) ListMetrics(ctx context.Context, namespace string, metric *model page, err := paginator.NextPage(ctx) if err != nil { promutil.CloudwatchAPIErrorCounter.WithLabelValues("ListMetrics").Inc() - c.logger.Error(err, "ListMetrics error") + c.logger.Error("ListMetrics error", "err", err) return err } metricsPage := toModelMetric(page) - if c.logger.IsDebugEnabled() { - c.logger.Debug("ListMetrics", "output", metricsPage) - } + c.logger.Debug("ListMetrics", "output", metricsPage) fn(metricsPage) } @@ -127,9 +123,7 @@ func (c client) GetMetricData(ctx context.Context, getMetricData []*model.Cloudw } var resp cloudwatch.GetMetricDataOutput promutil.CloudwatchGetMetricDataAPIMetricsCounter.Add(float64(len(input.MetricDataQueries))) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricData", "input", input) - } + c.logger.Debug("GetMetricData", "input", input) paginator := cloudwatch.NewGetMetricDataPaginator(c.cloudwatchAPI, input, func(options *cloudwatch.GetMetricDataPaginatorOptions) { options.StopOnDuplicateToken = true @@ -141,15 +135,13 @@ func (c client) GetMetricData(ctx context.Context, getMetricData []*model.Cloudw page, err := paginator.NextPage(ctx) if err != nil { promutil.CloudwatchAPIErrorCounter.WithLabelValues("GetMetricData").Inc() - c.logger.Error(err, "GetMetricData error") + c.logger.Error("GetMetricData error", "err", err) return nil } resp.MetricDataResults = append(resp.MetricDataResults, page.MetricDataResults...) } - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricData", "output", resp) - } + c.logger.Debug("GetMetricData", "output", resp) return toMetricDataResult(resp) } @@ -167,24 +159,20 @@ func toMetricDataResult(resp cloudwatch.GetMetricDataOutput) []cloudwatch_client return output } -func (c client) GetMetricStatistics(ctx context.Context, logger logging.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint { +func (c client) GetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.Datapoint { filter := createGetMetricStatisticsInput(logger, dimensions, &namespace, metric) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricStatistics", "input", filter) - } + c.logger.Debug("GetMetricStatistics", "input", filter) resp, err := c.cloudwatchAPI.GetMetricStatistics(ctx, filter) - if c.logger.IsDebugEnabled() { - c.logger.Debug("GetMetricStatistics", "output", resp) - } + c.logger.Debug("GetMetricStatistics", "output", resp) promutil.CloudwatchAPICounter.WithLabelValues("GetMetricStatistics").Inc() promutil.CloudwatchGetMetricStatisticsAPICounter.Inc() if err != nil { promutil.CloudwatchAPIErrorCounter.WithLabelValues("GetMetricStatistics").Inc() - c.logger.Error(err, "Failed to get metric statistics") + c.logger.Error("Failed to get metric statistics", "err", err) return nil } diff --git a/pkg/clients/cloudwatch/v2/input.go b/pkg/clients/cloudwatch/v2/input.go index 15b1a3410..5c27fb9fb 100644 --- a/pkg/clients/cloudwatch/v2/input.go +++ b/pkg/clients/cloudwatch/v2/input.go @@ -13,6 +13,7 @@ package v2 import ( + "log/slog" "strconv" "strings" "time" @@ -21,7 +22,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) @@ -39,7 +39,7 @@ func toCloudWatchDimensions(dimensions []model.Dimension) []types.Dimension { return cwDim } -func createGetMetricStatisticsInput(logger logging.Logger, dimensions []model.Dimension, namespace *string, metric *model.MetricConfig) *cloudwatch.GetMetricStatisticsInput { +func createGetMetricStatisticsInput(logger *slog.Logger, dimensions []model.Dimension, namespace *string, metric *model.MetricConfig) *cloudwatch.GetMetricStatisticsInput { period := metric.Period length := metric.Length delay := metric.Delay @@ -67,19 +67,17 @@ func createGetMetricStatisticsInput(logger logging.Logger, dimensions []model.Di ExtendedStatistics: extendedStatistics, } - if logger.IsDebugEnabled() { - logger.Debug("CLI helper - " + - "aws cloudwatch get-metric-statistics" + - " --metric-name " + metric.Name + - " --dimensions " + dimensionsToCliString(dimensions) + - " --namespace " + *namespace + - " --statistics " + string(statistics[0]) + - " --period " + strconv.FormatInt(period, 10) + - " --start-time " + startTime.Format(time.RFC3339) + - " --end-time " + endTime.Format(time.RFC3339)) + logger.Debug("CLI helper - " + + "aws cloudwatch get-metric-statistics" + + " --metric-name " + metric.Name + + " --dimensions " + dimensionsToCliString(dimensions) + + " --namespace " + *namespace + + " --statistics " + string(statistics[0]) + + " --period " + strconv.FormatInt(period, 10) + + " --start-time " + startTime.Format(time.RFC3339) + + " --end-time " + endTime.Format(time.RFC3339)) - logger.Debug("createGetMetricStatisticsInput", "output", *output) - } + logger.Debug("createGetMetricStatisticsInput", "output", *output) return output } diff --git a/pkg/clients/tagging/v1/client.go b/pkg/clients/tagging/v1/client.go index 9e997b77b..4b8019da4 100644 --- a/pkg/clients/tagging/v1/client.go +++ b/pkg/clients/tagging/v1/client.go @@ -15,6 +15,7 @@ package v1 import ( "context" "fmt" + "log/slog" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface" @@ -30,13 +31,12 @@ import ( "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) type client struct { - logger logging.Logger + logger *slog.Logger taggingAPI resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI autoscalingAPI autoscalingiface.AutoScalingAPI apiGatewayAPI apigatewayiface.APIGatewayAPI @@ -49,7 +49,7 @@ type client struct { } func NewClient( - logger logging.Logger, + logger *slog.Logger, taggingAPI resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI, autoscalingAPI autoscalingiface.AutoScalingAPI, apiGatewayAPI apigatewayiface.APIGatewayAPI, diff --git a/pkg/clients/tagging/v2/client.go b/pkg/clients/tagging/v2/client.go index 54bacf7d2..1e1f26edf 100644 --- a/pkg/clients/tagging/v2/client.go +++ b/pkg/clients/tagging/v2/client.go @@ -15,6 +15,7 @@ package v2 import ( "context" "fmt" + "log/slog" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/amp" @@ -30,13 +31,12 @@ import ( "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) type client struct { - logger logging.Logger + logger *slog.Logger taggingAPI *resourcegroupstaggingapi.Client autoscalingAPI *autoscaling.Client apiGatewayAPI *apigateway.Client @@ -49,7 +49,7 @@ type client struct { } func NewClient( - logger logging.Logger, + logger *slog.Logger, taggingAPI *resourcegroupstaggingapi.Client, autoscalingAPI *autoscaling.Client, apiGatewayAPI *apigateway.Client, diff --git a/pkg/clients/v1/factory.go b/pkg/clients/v1/factory.go index dfa782348..75d03698b 100644 --- a/pkg/clients/v1/factory.go +++ b/pkg/clients/v1/factory.go @@ -13,6 +13,8 @@ package v1 import ( + "context" + "log/slog" "os" "sync" "time" @@ -52,7 +54,6 @@ import ( cloudwatch_v1 "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch/v1" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging" tagging_v1 "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging/v1" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -67,7 +68,7 @@ type CachingFactory struct { refreshed bool mu sync.Mutex fips bool - logger logging.Logger + logger *slog.Logger } type cachedClients struct { @@ -84,7 +85,7 @@ type cachedClients struct { var _ clients.Factory = &CachingFactory{} // NewFactory creates a new client factory to use when fetching data from AWS with sdk v1 -func NewFactory(logger logging.Logger, jobsCfg model.JobsConfig, fips bool) *CachingFactory { +func NewFactory(logger *slog.Logger, jobsCfg model.JobsConfig, fips bool) *CachingFactory { stscache := map[model.Role]stsiface.STSAPI{} iamcache := map[model.Role]iamiface.IAMAPI{} cache := map[model.Role]map[string]*cachedClients{} @@ -221,15 +222,15 @@ func (c *CachingFactory) Refresh() { // sessions really only need to be constructed once at runtime if c.session == nil { - c.session = createAWSSession(c.endpointResolver, c.logger.IsDebugEnabled()) + c.session = createAWSSession(c.endpointResolver, c.logger) } for role := range c.stscache { - c.stscache[role] = createStsSession(c.session, role, c.stsRegion, c.fips, c.logger.IsDebugEnabled()) + c.stscache[role] = createStsSession(c.session, role, c.stsRegion, c.fips, c.logger) } for role := range c.iamcache { - c.iamcache[role] = createIamSession(c.session, role, c.fips, c.logger.IsDebugEnabled()) + c.iamcache[role] = createIamSession(c.session, role, c.fips, c.logger) } for role, regions := range c.clients { @@ -251,32 +252,32 @@ func (c *CachingFactory) Refresh() { c.refreshed = true } -func createCloudWatchClient(logger logging.Logger, s *session.Session, region *string, role model.Role, fips bool) cloudwatch_client.Client { +func createCloudWatchClient(logger *slog.Logger, s *session.Session, region *string, role model.Role, fips bool) cloudwatch_client.Client { return cloudwatch_v1.NewClient( logger, - createCloudwatchSession(s, region, role, fips, logger.IsDebugEnabled()), + createCloudwatchSession(s, region, role, fips, logger), ) } -func createTaggingClient(logger logging.Logger, session *session.Session, region *string, role model.Role, fips bool) tagging.Client { +func createTaggingClient(logger *slog.Logger, session *session.Session, region *string, role model.Role, fips bool) tagging.Client { // The createSession function for a service which does not support FIPS does not take a fips parameter // This currently applies to createTagSession(Resource Groups Tagging), ASG (EC2 autoscaling), and Prometheus (Amazon Managed Prometheus) // AWS FIPS Reference: https://aws.amazon.com/compliance/fips/ return tagging_v1.NewClient( logger, - createTagSession(session, region, role, logger.IsDebugEnabled()), - createASGSession(session, region, role, logger.IsDebugEnabled()), - createAPIGatewaySession(session, region, role, fips, logger.IsDebugEnabled()), - createAPIGatewayV2Session(session, region, role, fips, logger.IsDebugEnabled()), - createEC2Session(session, region, role, fips, logger.IsDebugEnabled()), - createDMSSession(session, region, role, fips, logger.IsDebugEnabled()), - createPrometheusSession(session, region, role, logger.IsDebugEnabled()), - createStorageGatewaySession(session, region, role, fips, logger.IsDebugEnabled()), - createShieldSession(session, region, role, fips, logger.IsDebugEnabled()), + createTagSession(session, region, role, logger), + createASGSession(session, region, role, logger), + createAPIGatewaySession(session, region, role, fips, logger), + createAPIGatewayV2Session(session, region, role, fips, logger), + createEC2Session(session, region, role, fips, logger), + createDMSSession(session, region, role, fips, logger), + createPrometheusSession(session, region, role, logger), + createStorageGatewaySession(session, region, role, fips, logger), + createShieldSession(session, region, role, fips, logger), ) } -func createAccountClient(logger logging.Logger, sts stsiface.STSAPI, iam iamiface.IAMAPI) account.Client { +func createAccountClient(logger *slog.Logger, sts stsiface.STSAPI, iam iamiface.IAMAPI) account.Client { return account_v1.NewClient(logger, sts, iam) } @@ -347,13 +348,13 @@ func getAwsRetryer() aws.RequestRetryer { } } -func createAWSSession(resolver endpoints.ResolverFunc, isDebugEnabled bool) *session.Session { +func createAWSSession(resolver endpoints.ResolverFunc, logger *slog.Logger) *session.Session { config := aws.Config{ CredentialsChainVerboseErrors: aws.Bool(true), EndpointResolver: resolver, } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } @@ -364,7 +365,7 @@ func createAWSSession(resolver endpoints.ResolverFunc, isDebugEnabled bool) *ses return sess } -func createStsSession(sess *session.Session, role model.Role, region string, fips bool, isDebugEnabled bool) *sts.STS { +func createStsSession(sess *session.Session, role model.Role, region string, fips bool, logger *slog.Logger) *sts.STS { maxStsRetries := 5 config := &aws.Config{MaxRetries: &maxStsRetries} @@ -376,14 +377,14 @@ func createStsSession(sess *session.Session, role model.Role, region string, fip config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return sts.New(sess, setSTSCreds(sess, config, role)) } -func createIamSession(sess *session.Session, role model.Role, fips bool, isDebugEnabled bool) *iam.IAM { +func createIamSession(sess *session.Session, role model.Role, fips bool, logger *slog.Logger) *iam.IAM { maxStsRetries := 5 config := &aws.Config{MaxRetries: &maxStsRetries} @@ -391,28 +392,28 @@ func createIamSession(sess *session.Session, role model.Role, fips bool, isDebug config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return iam.New(sess, setSTSCreds(sess, config, role)) } -func createCloudwatchSession(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) *cloudwatch.CloudWatch { +func createCloudwatchSession(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) *cloudwatch.CloudWatch { config := &aws.Config{Region: region, Retryer: getAwsRetryer()} if fips { config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return cloudwatch.New(sess, setSTSCreds(sess, config, role)) } -func createTagSession(sess *session.Session, region *string, role model.Role, isDebugEnabled bool) *resourcegroupstaggingapi.ResourceGroupsTaggingAPI { +func createTagSession(sess *session.Session, region *string, role model.Role, logger *slog.Logger) *resourcegroupstaggingapi.ResourceGroupsTaggingAPI { maxResourceGroupTaggingRetries := 5 config := &aws.Config{ Region: region, @@ -420,53 +421,53 @@ func createTagSession(sess *session.Session, region *string, role model.Role, is CredentialsChainVerboseErrors: aws.Bool(true), } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return resourcegroupstaggingapi.New(sess, setSTSCreds(sess, config, role)) } -func createAPIGatewaySession(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) apigatewayiface.APIGatewayAPI { +func createAPIGatewaySession(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) apigatewayiface.APIGatewayAPI { maxAPIGatewayAPIRetries := 5 config := &aws.Config{Region: region, MaxRetries: &maxAPIGatewayAPIRetries} if fips { config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return apigateway.New(sess, setSTSCreds(sess, config, role)) } -func createAPIGatewayV2Session(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) apigatewayv2iface.ApiGatewayV2API { +func createAPIGatewayV2Session(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) apigatewayv2iface.ApiGatewayV2API { maxAPIGatewayAPIRetries := 5 config := &aws.Config{Region: region, MaxRetries: &maxAPIGatewayAPIRetries} if fips { config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return apigatewayv2.New(sess, setSTSCreds(sess, config, role)) } -func createASGSession(sess *session.Session, region *string, role model.Role, isDebugEnabled bool) autoscalingiface.AutoScalingAPI { +func createASGSession(sess *session.Session, region *string, role model.Role, logger *slog.Logger) autoscalingiface.AutoScalingAPI { maxAutoScalingAPIRetries := 5 config := &aws.Config{Region: region, MaxRetries: &maxAutoScalingAPIRetries} - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return autoscaling.New(sess, setSTSCreds(sess, config, role)) } -func createStorageGatewaySession(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) storagegatewayiface.StorageGatewayAPI { +func createStorageGatewaySession(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) storagegatewayiface.StorageGatewayAPI { maxStorageGatewayAPIRetries := 5 config := &aws.Config{Region: region, MaxRetries: &maxStorageGatewayAPIRetries} @@ -474,60 +475,60 @@ func createStorageGatewaySession(sess *session.Session, region *string, role mod config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return storagegateway.New(sess, setSTSCreds(sess, config, role)) } -func createEC2Session(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) ec2iface.EC2API { +func createEC2Session(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) ec2iface.EC2API { maxEC2APIRetries := 10 config := &aws.Config{Region: region, MaxRetries: &maxEC2APIRetries} if fips { config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return ec2.New(sess, setSTSCreds(sess, config, role)) } -func createPrometheusSession(sess *session.Session, region *string, role model.Role, isDebugEnabled bool) prometheusserviceiface.PrometheusServiceAPI { +func createPrometheusSession(sess *session.Session, region *string, role model.Role, logger *slog.Logger) prometheusserviceiface.PrometheusServiceAPI { maxPrometheusAPIRetries := 10 config := &aws.Config{Region: region, MaxRetries: &maxPrometheusAPIRetries} - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return prometheusservice.New(sess, setSTSCreds(sess, config, role)) } -func createDMSSession(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) databasemigrationserviceiface.DatabaseMigrationServiceAPI { +func createDMSSession(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) databasemigrationserviceiface.DatabaseMigrationServiceAPI { maxDMSAPIRetries := 5 config := &aws.Config{Region: region, MaxRetries: &maxDMSAPIRetries} if fips { config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } return databasemigrationservice.New(sess, setSTSCreds(sess, config, role)) } -func createShieldSession(sess *session.Session, region *string, role model.Role, fips bool, isDebugEnabled bool) shieldiface.ShieldAPI { +func createShieldSession(sess *session.Session, region *string, role model.Role, fips bool, logger *slog.Logger) shieldiface.ShieldAPI { maxShieldAPIRetries := 5 config := &aws.Config{Region: region, MaxRetries: &maxShieldAPIRetries} if fips { config.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled } - if isDebugEnabled { + if logger != nil && logger.Enabled(context.Background(), slog.LevelDebug) { config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } diff --git a/pkg/clients/v1/factory_test.go b/pkg/clients/v1/factory_test.go index 1a206e8cd..b36ebcd52 100644 --- a/pkg/clients/v1/factory_test.go +++ b/pkg/clients/v1/factory_test.go @@ -23,10 +23,10 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/awstesting/mock" "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -82,7 +82,7 @@ func TestNewClientCache(t *testing.T) { "an empty config gives an empty cache", model.JobsConfig{}, false, - &CachingFactory{logger: logging.NewNopLogger()}, + &CachingFactory{logger: promslog.NewNopLogger()}, }, { "if fips is set then the clients has fips", @@ -90,7 +90,7 @@ func TestNewClientCache(t *testing.T) { true, &CachingFactory{ fips: true, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, { @@ -153,7 +153,7 @@ func TestNewClientCache(t *testing.T) { "ap-northeast-3": &cachedClients{}, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, { @@ -239,7 +239,7 @@ func TestNewClientCache(t *testing.T) { "ap-northeast-1": &cachedClients{onlyStatic: true}, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, { @@ -362,7 +362,7 @@ func TestNewClientCache(t *testing.T) { "ap-northeast-3": &cachedClients{}, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, { @@ -451,7 +451,7 @@ func TestNewClientCache(t *testing.T) { "ap-northeast-1": &cachedClients{onlyStatic: true}, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, } @@ -460,7 +460,7 @@ func TestNewClientCache(t *testing.T) { test := l t.Run(test.descrip, func(t *testing.T) { t.Parallel() - cache := NewFactory(logging.NewNopLogger(), test.jobsCfg, test.fips) + cache := NewFactory(promslog.NewNopLogger(), test.jobsCfg, test.fips) t.Logf("the cache is: %v", cache) if test.cache.cleared != cache.cleared { @@ -505,14 +505,14 @@ func TestClear(t *testing.T) { clients: map[model.Role]map[string]*cachedClients{ {}: { "us-east-1": &cachedClients{ - cloudwatch: createCloudWatchClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - tagging: createTaggingClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - account: createAccountClient(logging.NewNopLogger(), nil, nil), + cloudwatch: createCloudWatchClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + tagging: createTaggingClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + account: createAccountClient(promslog.NewNopLogger(), nil, nil), onlyStatic: true, }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, { @@ -533,7 +533,7 @@ func TestClear(t *testing.T) { }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, }, } @@ -605,7 +605,7 @@ func TestRefresh(t *testing.T) { }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, false, }, @@ -628,7 +628,7 @@ func TestRefresh(t *testing.T) { }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, true, }, @@ -639,18 +639,18 @@ func TestRefresh(t *testing.T) { mu: sync.Mutex{}, session: mock.Session, stscache: map[model.Role]stsiface.STSAPI{ - {}: createStsSession(mock.Session, role, "", false, false), + {}: createStsSession(mock.Session, role, "", false, nil), }, clients: map[model.Role]map[string]*cachedClients{ {}: { "us-east-1": &cachedClients{ - cloudwatch: createCloudWatchClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - tagging: createTaggingClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - account: createAccountClient(logging.NewNopLogger(), createStsSession(mock.Session, role, "", false, false), createIamSession(mock.Session, role, false, false)), + cloudwatch: createCloudWatchClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + tagging: createTaggingClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + account: createAccountClient(promslog.NewNopLogger(), createStsSession(mock.Session, role, "", false, nil), createIamSession(mock.Session, role, false, nil)), }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, false, }, @@ -762,13 +762,13 @@ func testGetAWSClient( clients: map[model.Role]map[string]*cachedClients{ {}: { "us-east-1": &cachedClients{ - cloudwatch: createCloudWatchClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - tagging: createTaggingClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - account: createAccountClient(logging.NewNopLogger(), createStsSession(mock.Session, role, "", false, false), createIamSession(mock.Session, role, false, false)), + cloudwatch: createCloudWatchClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + tagging: createTaggingClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + account: createAccountClient(promslog.NewNopLogger(), createStsSession(mock.Session, role, "", false, nil), createIamSession(mock.Session, role, false, nil)), }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, true, }, @@ -784,13 +784,13 @@ func testGetAWSClient( clients: map[model.Role]map[string]*cachedClients{ {}: { "us-east-1": &cachedClients{ - cloudwatch: createCloudWatchClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - tagging: createTaggingClient(logging.NewNopLogger(), mock.Session, ®ion, role, false), - account: createAccountClient(logging.NewNopLogger(), createStsSession(mock.Session, role, "", false, false), createIamSession(mock.Session, role, false, false)), + cloudwatch: createCloudWatchClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + tagging: createTaggingClient(promslog.NewNopLogger(), mock.Session, ®ion, role, false), + account: createAccountClient(promslog.NewNopLogger(), createStsSession(mock.Session, role, "", false, nil), createIamSession(mock.Session, role, false, nil)), }, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, false, }, @@ -808,7 +808,7 @@ func testGetAWSClient( "us-east-1": &cachedClients{}, }, }, - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), }, false, }, @@ -925,7 +925,7 @@ func TestCreateAWSSession(t *testing.T) { for _, l := range tests { test := l t.Run(test.descrip, func(t *testing.T) { - s := createAWSSession(endpoints.DefaultResolver().EndpointFor, false) + s := createAWSSession(endpoints.DefaultResolver().EndpointFor, nil) if s == nil { t.Fail() } @@ -978,7 +978,7 @@ func TestCreateStsSession(t *testing.T) { t.Run(test.descrip, func(t *testing.T) { t.Parallel() // just exercise the code path - iface := createStsSession(mock.Session, test.role, test.stsRegion, false, false) + iface := createStsSession(mock.Session, test.role, test.stsRegion, false, nil) if iface == nil { t.Fail() } @@ -991,7 +991,7 @@ func TestCreateCloudwatchSession(t *testing.T) { t, "Cloudwatch", func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createCloudwatchSession(s, region, role, fips, false) + iface := createCloudwatchSession(s, region, role, fips, nil) if iface == nil { t.Fail() } @@ -1002,8 +1002,8 @@ func TestCreateTagSession(t *testing.T) { testAWSClient( t, "Tag", - func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createTagSession(s, region, role, fips) + func(t *testing.T, s *session.Session, region *string, role model.Role, _ bool) { + iface := createTagSession(s, region, role, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1015,7 +1015,7 @@ func TestCreateAPIGatewaySession(t *testing.T) { t, "APIGateway", func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createAPIGatewaySession(s, region, role, fips, false) + iface := createAPIGatewaySession(s, region, role, fips, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1027,7 +1027,7 @@ func TestCreateAPIGatewayV2Session(t *testing.T) { t, "APIGatewayV2", func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createAPIGatewayV2Session(s, region, role, fips, false) + iface := createAPIGatewayV2Session(s, region, role, fips, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1038,8 +1038,8 @@ func TestCreateASGSession(t *testing.T) { testAWSClient( t, "ASG", - func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createASGSession(s, region, role, fips) + func(t *testing.T, s *session.Session, region *string, role model.Role, _ bool) { + iface := createASGSession(s, region, role, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1051,7 +1051,7 @@ func TestCreateEC2Session(t *testing.T) { t, "EC2", func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createEC2Session(s, region, role, fips, false) + iface := createEC2Session(s, region, role, fips, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1063,7 +1063,7 @@ func TestCreatePrometheusSession(t *testing.T) { t, "Prometheus", func(t *testing.T, s *session.Session, region *string, role model.Role, _ bool) { - iface := createPrometheusSession(s, region, role, false) + iface := createPrometheusSession(s, region, role, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1075,7 +1075,7 @@ func TestCreateDMSSession(t *testing.T) { t, "DMS", func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createDMSSession(s, region, role, fips, false) + iface := createDMSSession(s, region, role, fips, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1087,7 +1087,7 @@ func TestCreateStorageGatewaySession(t *testing.T) { t, "StorageGateway", func(t *testing.T, s *session.Session, region *string, role model.Role, fips bool) { - iface := createStorageGatewaySession(s, region, role, fips, false) + iface := createStorageGatewaySession(s, region, role, fips, promslog.NewNopLogger()) if iface == nil { t.Fail() } @@ -1140,7 +1140,7 @@ func TestSTSResolvesFIPSEnabledEndpoints(t *testing.T) { mockSession.Config.Endpoint = nil - sess := createStsSession(mock.Session, model.Role{}, tc.region, true, false) + sess := createStsSession(mock.Session, model.Role{}, tc.region, true, promslog.NewNopLogger()) require.NotNil(t, sess) require.True(t, called, "expected endpoint resolver to be called") diff --git a/pkg/clients/v2/factory.go b/pkg/clients/v2/factory.go index 745e1a8ef..ef5f01a3c 100644 --- a/pkg/clients/v2/factory.go +++ b/pkg/clients/v2/factory.go @@ -15,6 +15,7 @@ package v2 import ( "context" "fmt" + "log/slog" "os" "sync" "time" @@ -44,14 +45,13 @@ import ( cloudwatch_v2 "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch/v2" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging" tagging_v2 "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging/v2" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) type awsRegion = string type CachingFactory struct { - logger logging.Logger + logger *slog.Logger stsOptions func(*sts.Options) clients map[model.Role]map[awsRegion]*cachedClients mu sync.Mutex @@ -76,17 +76,17 @@ type cachedClients struct { var _ clients.Factory = &CachingFactory{} // NewFactory creates a new client factory to use when fetching data from AWS with sdk v2 -func NewFactory(logger logging.Logger, jobsCfg model.JobsConfig, fips bool) (*CachingFactory, error) { +func NewFactory(logger *slog.Logger, jobsCfg model.JobsConfig, fips bool) (*CachingFactory, error) { var options []func(*aws_config.LoadOptions) error options = append(options, aws_config.WithLogger(aws_logging.LoggerFunc(func(classification aws_logging.Classification, format string, v ...interface{}) { if classification == aws_logging.Debug { - if logger.IsDebugEnabled() { + if logger.Enabled(context.Background(), slog.LevelDebug) { logger.Debug(fmt.Sprintf(format, v...)) } } else if classification == aws_logging.Warn { logger.Warn(fmt.Sprintf(format, v...)) } else { // AWS logging only supports debug or warn, log everything else as error - logger.Error(fmt.Errorf("unexected aws error classification: %s", classification), fmt.Sprintf(format, v...)) + logger.Error(fmt.Sprintf(format, v...), "err", "unexected aws error classification", "classification", classification) } }))) @@ -101,7 +101,7 @@ func NewFactory(logger logging.Logger, jobsCfg model.JobsConfig, fips bool) (*Ca return nil, fmt.Errorf("failed to load default aws config: %w", err) } - stsOptions := createStsOptions(jobsCfg.StsRegion, logger.IsDebugEnabled(), endpointURLOverride, fips) + stsOptions := createStsOptions(jobsCfg.StsRegion, logger.Enabled(context.Background(), slog.LevelDebug), endpointURLOverride, fips) cache := map[model.Role]map[awsRegion]*cachedClients{} for _, discoveryJob := range jobsCfg.DiscoveryJobs { for _, role := range discoveryJob.Roles { @@ -281,7 +281,7 @@ func (c *CachingFactory) Clear() { func (c *CachingFactory) createCloudwatchClient(regionConfig *aws.Config) *cloudwatch.Client { return cloudwatch.NewFromConfig(*regionConfig, func(options *cloudwatch.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -302,7 +302,7 @@ func (c *CachingFactory) createCloudwatchClient(regionConfig *aws.Config) *cloud func (c *CachingFactory) createTaggingClient(regionConfig *aws.Config) *resourcegroupstaggingapi.Client { return resourcegroupstaggingapi.NewFromConfig(*regionConfig, func(options *resourcegroupstaggingapi.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -316,7 +316,7 @@ func (c *CachingFactory) createTaggingClient(regionConfig *aws.Config) *resource func (c *CachingFactory) createAutoScalingClient(assumedConfig *aws.Config) *autoscaling.Client { return autoscaling.NewFromConfig(*assumedConfig, func(options *autoscaling.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -332,7 +332,7 @@ func (c *CachingFactory) createAutoScalingClient(assumedConfig *aws.Config) *aut func (c *CachingFactory) createAPIGatewayClient(assumedConfig *aws.Config) *apigateway.Client { return apigateway.NewFromConfig(*assumedConfig, func(options *apigateway.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -346,7 +346,7 @@ func (c *CachingFactory) createAPIGatewayClient(assumedConfig *aws.Config) *apig func (c *CachingFactory) createAPIGatewayV2Client(assumedConfig *aws.Config) *apigatewayv2.Client { return apigatewayv2.NewFromConfig(*assumedConfig, func(options *apigatewayv2.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -360,7 +360,7 @@ func (c *CachingFactory) createAPIGatewayV2Client(assumedConfig *aws.Config) *ap func (c *CachingFactory) createEC2Client(assumedConfig *aws.Config) *ec2.Client { return ec2.NewFromConfig(*assumedConfig, func(options *ec2.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -374,7 +374,7 @@ func (c *CachingFactory) createEC2Client(assumedConfig *aws.Config) *ec2.Client func (c *CachingFactory) createDMSClient(assumedConfig *aws.Config) *databasemigrationservice.Client { return databasemigrationservice.NewFromConfig(*assumedConfig, func(options *databasemigrationservice.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -388,7 +388,7 @@ func (c *CachingFactory) createDMSClient(assumedConfig *aws.Config) *databasemig func (c *CachingFactory) createStorageGatewayClient(assumedConfig *aws.Config) *storagegateway.Client { return storagegateway.NewFromConfig(*assumedConfig, func(options *storagegateway.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -402,7 +402,7 @@ func (c *CachingFactory) createStorageGatewayClient(assumedConfig *aws.Config) * func (c *CachingFactory) createPrometheusClient(assumedConfig *aws.Config) *amp.Client { return amp.NewFromConfig(*assumedConfig, func(options *amp.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { @@ -424,7 +424,7 @@ func (c *CachingFactory) createIAMClient(awsConfig *aws.Config) *iam.Client { func (c *CachingFactory) createShieldClient(awsConfig *aws.Config) *shield.Client { return shield.NewFromConfig(*awsConfig, func(options *shield.Options) { - if c.logger.IsDebugEnabled() { + if c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) { options.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody } if c.endpointURLOverride != "" { diff --git a/pkg/clients/v2/factory_test.go b/pkg/clients/v2/factory_test.go index 795accd59..56e9d5df8 100644 --- a/pkg/clients/v2/factory_test.go +++ b/pkg/clients/v2/factory_test.go @@ -14,6 +14,7 @@ package v2 import ( "context" + "log/slog" "reflect" "testing" "time" @@ -29,11 +30,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" "github.com/aws/aws-sdk-go-v2/service/storagegateway" "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" cloudwatch_client "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -120,7 +121,7 @@ func TestNewFactory_initializes_clients(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - output, err := NewFactory(logging.NewNopLogger(), test.jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), test.jobsCfg, false) require.NoError(t, err) assert.False(t, output.refreshed) @@ -162,7 +163,7 @@ func TestNewFactory_respects_stsregion(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), cfg, false) + output, err := NewFactory(promslog.NewNopLogger(), cfg, false) require.NoError(t, err) require.Len(t, output.clients, 1) stsOptions := sts.Options{} @@ -172,7 +173,7 @@ func TestNewFactory_respects_stsregion(t *testing.T) { func TestCachingFactory_Clear(t *testing.T) { cache := &CachingFactory{ - logger: logging.NewNopLogger(), + logger: promslog.NewNopLogger(), clients: map[model.Role]map[awsRegion]*cachedClients{ defaultRole: { "region1": &cachedClients{ @@ -200,7 +201,7 @@ func TestCachingFactory_Clear(t *testing.T) { func TestCachingFactory_Refresh(t *testing.T) { t.Run("creates all clients when config contains only discovery jobs", func(t *testing.T) { - output, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, false) require.NoError(t, err) output.Refresh() @@ -226,7 +227,7 @@ func TestCachingFactory_Refresh(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) output.Refresh() @@ -250,7 +251,7 @@ func TestCachingFactory_GetAccountClient(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) output.Refresh() @@ -268,7 +269,7 @@ func TestCachingFactory_GetAccountClient(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) clients := output.clients[defaultRole]["region1"] @@ -289,7 +290,7 @@ func TestCachingFactory_GetCloudwatchClient(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) output.Refresh() @@ -308,7 +309,7 @@ func TestCachingFactory_GetCloudwatchClient(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) clients := output.clients[defaultRole]["region1"] @@ -329,7 +330,7 @@ func TestCachingFactory_GetTaggingClient(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) output.Refresh() @@ -348,7 +349,7 @@ func TestCachingFactory_GetTaggingClient(t *testing.T) { }}, } - output, err := NewFactory(logging.NewNopLogger(), jobsCfg, false) + output, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false) require.NoError(t, err) clients := output.clients[defaultRole]["region1"] @@ -361,7 +362,7 @@ func TestCachingFactory_GetTaggingClient(t *testing.T) { } func TestCachingFactory_createTaggingClient_DoesNotEnableFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createTaggingClient(factory.clients[defaultRole]["region1"].awsConfig) @@ -374,7 +375,7 @@ func TestCachingFactory_createTaggingClient_DoesNotEnableFIPS(t *testing.T) { } func TestCachingFactory_createAPIGatewayClient_EnablesFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createAPIGatewayClient(factory.clients[defaultRole]["region1"].awsConfig) @@ -387,7 +388,7 @@ func TestCachingFactory_createAPIGatewayClient_EnablesFIPS(t *testing.T) { } func TestCachingFactory_createAPIGatewayV2Client_EnablesFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createAPIGatewayV2Client(factory.clients[defaultRole]["region1"].awsConfig) @@ -400,7 +401,7 @@ func TestCachingFactory_createAPIGatewayV2Client_EnablesFIPS(t *testing.T) { } func TestCachingFactory_createAutoScalingClient_DoesNotEnableFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createAutoScalingClient(factory.clients[defaultRole]["region1"].awsConfig) @@ -413,7 +414,7 @@ func TestCachingFactory_createAutoScalingClient_DoesNotEnableFIPS(t *testing.T) } func TestCachingFactory_createEC2Client_EnablesFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createEC2Client(factory.clients[defaultRole]["region1"].awsConfig) @@ -426,7 +427,7 @@ func TestCachingFactory_createEC2Client_EnablesFIPS(t *testing.T) { } func TestCachingFactory_createDMSClient_EnablesFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createDMSClient(factory.clients[defaultRole]["region1"].awsConfig) @@ -439,7 +440,7 @@ func TestCachingFactory_createDMSClient_EnablesFIPS(t *testing.T) { } func TestCachingFactory_createStorageGatewayClient_EnablesFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createStorageGatewayClient(factory.clients[defaultRole]["region1"].awsConfig) @@ -452,7 +453,7 @@ func TestCachingFactory_createStorageGatewayClient_EnablesFIPS(t *testing.T) { } func TestCachingFactory_createPrometheusClient_DoesNotEnableFIPS(t *testing.T) { - factory, err := NewFactory(logging.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) + factory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true) require.NoError(t, err) client := factory.createPrometheusClient(factory.clients[defaultRole]["region1"].awsConfig) @@ -495,6 +496,6 @@ func (t testClient) GetMetricData(_ context.Context, _ []*model.CloudwatchData, return nil } -func (t testClient) GetMetricStatistics(_ context.Context, _ logging.Logger, _ []model.Dimension, _ string, _ *model.MetricConfig) []*model.Datapoint { +func (t testClient) GetMetricStatistics(_ context.Context, _ *slog.Logger, _ []model.Dimension, _ string, _ *model.MetricConfig) []*model.Datapoint { return nil } diff --git a/pkg/config/config.go b/pkg/config/config.go index 8a60377eb..d455c97e9 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -15,13 +15,13 @@ package config import ( "errors" "fmt" + "log/slog" "os" "github.com/aws/aws-sdk-go/aws" "github.com/grafana/regexp" "gopkg.in/yaml.v2" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -119,7 +119,7 @@ func (r *Role) ValidateRole(roleIdx int, parent string) error { return nil } -func (c *ScrapeConf) Load(file string, logger logging.Logger) (model.JobsConfig, error) { +func (c *ScrapeConf) Load(file string, logger *slog.Logger) (model.JobsConfig, error) { yamlFile, err := os.ReadFile(file) if err != nil { return model.JobsConfig{}, err @@ -152,7 +152,7 @@ func (c *ScrapeConf) Load(file string, logger logging.Logger) (model.JobsConfig, return c.Validate(logger) } -func (c *ScrapeConf) Validate(logger logging.Logger) (model.JobsConfig, error) { +func (c *ScrapeConf) Validate(logger *slog.Logger) (model.JobsConfig, error) { if c.Discovery.Jobs == nil && c.Static == nil && c.CustomNamespace == nil { return model.JobsConfig{}, fmt.Errorf("At least 1 Discovery job, 1 Static or one CustomNamespace must be defined") } @@ -212,7 +212,7 @@ func (c *ScrapeConf) Validate(logger logging.Logger) (model.JobsConfig, error) { return c.toModelConfig(), nil } -func (j *Job) validateDiscoveryJob(logger logging.Logger, jobIdx int) error { +func (j *Job) validateDiscoveryJob(logger *slog.Logger, jobIdx int) error { if j.Type != "" { if svc := SupportedServices.GetService(j.Type); svc == nil { if svc = SupportedServices.getServiceByAlias(j.Type); svc != nil { @@ -259,7 +259,7 @@ func (j *Job) validateDiscoveryJob(logger logging.Logger, jobIdx int) error { return nil } -func (j *CustomNamespace) validateCustomNamespaceJob(logger logging.Logger, jobIdx int) error { +func (j *CustomNamespace) validateCustomNamespaceJob(logger *slog.Logger, jobIdx int) error { if j.Name == "" { return fmt.Errorf("CustomNamespace job [%v]: Name should not be empty", jobIdx) } @@ -295,7 +295,7 @@ func (j *CustomNamespace) validateCustomNamespaceJob(logger logging.Logger, jobI return nil } -func (j *Static) validateStaticJob(logger logging.Logger, jobIdx int) error { +func (j *Static) validateStaticJob(logger *slog.Logger, jobIdx int) error { if j.Name == "" { return fmt.Errorf("Static job [%v]: Name should not be empty", jobIdx) } @@ -325,7 +325,7 @@ func (j *Static) validateStaticJob(logger logging.Logger, jobIdx int) error { return nil } -func (m *Metric) validateMetric(logger logging.Logger, metricIdx int, parent string, discovery *JobLevelMetricFields) error { +func (m *Metric) validateMetric(logger *slog.Logger, metricIdx int, parent string, discovery *JobLevelMetricFields) error { if m.Name == "" { return fmt.Errorf("Metric [%s/%d] in %v: Name should not be empty", m.Name, metricIdx, parent) } @@ -525,7 +525,7 @@ func toModelMetricConfig(metrics []*Metric) []*model.MetricConfig { } // logConfigErrors logs as warning any config unmarshalling error. -func logConfigErrors(cfg []byte, logger logging.Logger) { +func logConfigErrors(cfg []byte, logger *slog.Logger) { var sc ScrapeConf var errMsgs []string if err := yaml.UnmarshalStrict(cfg, &sc); err != nil { diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 58f503e70..87190cfc7 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -17,9 +17,8 @@ import ( "strings" "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" ) func TestConfLoad(t *testing.T) { @@ -35,7 +34,7 @@ func TestConfLoad(t *testing.T) { for _, tc := range testCases { config := ScrapeConf{} configFile := fmt.Sprintf("testdata/%s", tc.configFile) - if _, err := config.Load(configFile, logging.NewNopLogger()); err != nil { + if _, err := config.Load(configFile, promslog.NewNopLogger()); err != nil { t.Error(err) t.FailNow() } @@ -92,7 +91,7 @@ func TestBadConfigs(t *testing.T) { for _, tc := range testCases { config := ScrapeConf{} configFile := fmt.Sprintf("testdata/%s", tc.configFile) - if _, err := config.Load(configFile, logging.NewNopLogger()); err != nil { + if _, err := config.Load(configFile, promslog.NewNopLogger()); err != nil { if !strings.Contains(err.Error(), tc.errorMsg) { t.Errorf("expecter error for config file %q to contain %q but got: %s", tc.configFile, tc.errorMsg, err) t.FailNow() @@ -131,7 +130,7 @@ func TestValidateConfigFailuresWhenUsingAsLibrary(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - _, err := tc.config.Validate(logging.NewNopLogger()) + _, err := tc.config.Validate(promslog.NewNopLogger()) require.Error(t, err, "Expected config validation to fail") require.Equal(t, tc.errorMsg, err.Error()) }) diff --git a/pkg/exporter.go b/pkg/exporter.go index e8ba0a2df..9cea6b955 100644 --- a/pkg/exporter.go +++ b/pkg/exporter.go @@ -15,6 +15,7 @@ package exporter import ( "context" "fmt" + "log/slog" "github.com/prometheus/client_golang/prometheus" @@ -22,7 +23,6 @@ import ( "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil" ) @@ -165,7 +165,7 @@ func defaultOptions() options { // Parameters are: // - `ctx`: a context for the request // - `config`: this is the struct representation of the configuration defined in top-level configuration -// - `logger`: any implementation of the `logging.Logger` interface +// - `logger`: an *slog.Logger // - `registry`: any prometheus compatible registry where scraped AWS metrics will be written // - `factory`: any implementation of the `clients.Factory` interface // - `optFuncs`: (optional) any number of options funcs @@ -176,7 +176,7 @@ func defaultOptions() options { // track them over the lifetime of the application. func UpdateMetrics( ctx context.Context, - logger logging.Logger, + logger *slog.Logger, jobsCfg model.JobsConfig, registry *prometheus.Registry, factory clients.Factory, @@ -204,7 +204,7 @@ func UpdateMetrics( metrics, observedMetricLabels, err := promutil.BuildMetrics(cloudwatchData, options.labelsSnakeCase, logger) if err != nil { - logger.Error(err, "Error migrating cloudwatch metrics to prometheus metrics") + logger.Error("Error migrating cloudwatch metrics to prometheus metrics", "err", err) return nil } metrics, observedMetricLabels = promutil.BuildNamespaceInfoMetrics(tagsData, metrics, observedMetricLabels, options.labelsSnakeCase, logger) diff --git a/pkg/job/cloudwatchrunner/runner.go b/pkg/job/cloudwatchrunner/runner.go index ff3f672eb..4207fb20e 100644 --- a/pkg/job/cloudwatchrunner/runner.go +++ b/pkg/job/cloudwatchrunner/runner.go @@ -13,14 +13,15 @@ package cloudwatchrunner import ( + "log/slog" + "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/listmetrics" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/resourcemetadata" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) type ResourceEnrichment interface { - Create(logger logging.Logger) resourcemetadata.MetricResourceEnricher + Create(logger *slog.Logger) resourcemetadata.MetricResourceEnricher } type Job interface { diff --git a/pkg/job/custom.go b/pkg/job/custom.go index 9e6f47df9..4e40fbd5e 100644 --- a/pkg/job/custom.go +++ b/pkg/job/custom.go @@ -14,16 +14,16 @@ package job import ( "context" + "log/slog" "sync" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) func runCustomNamespaceJob( ctx context.Context, - logger logging.Logger, + logger *slog.Logger, job model.CustomNamespaceJob, clientCloudwatch cloudwatch.Client, gmdProcessor getMetricDataProcessor, @@ -37,7 +37,7 @@ func runCustomNamespaceJob( var err error cloudwatchDatas, err = gmdProcessor.Run(ctx, job.Namespace, cloudwatchDatas) if err != nil { - logger.Error(err, "Failed to get metric data") + logger.Error("Failed to get metric data", "err", err) return nil } @@ -48,7 +48,7 @@ func getMetricDataForQueriesForCustomNamespace( ctx context.Context, customNamespaceJob model.CustomNamespaceJob, clientCloudwatch cloudwatch.Client, - logger logging.Logger, + logger *slog.Logger, ) []*model.CloudwatchData { mux := &sync.Mutex{} var getMetricDatas []*model.CloudwatchData @@ -99,7 +99,7 @@ func getMetricDataForQueriesForCustomNamespace( mux.Unlock() }) if err != nil { - logger.Error(err, "Failed to get full metric list", "metric_name", metric.Name, "namespace", customNamespaceJob.Namespace) + logger.Error("Failed to get full metric list", "metric_name", metric.Name, "namespace", customNamespaceJob.Namespace, "err", err) return } }(metric) diff --git a/pkg/job/discovery.go b/pkg/job/discovery.go index 86485d6d3..3e776aff7 100644 --- a/pkg/job/discovery.go +++ b/pkg/job/discovery.go @@ -16,6 +16,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "sync" @@ -23,7 +24,6 @@ import ( "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/maxdimassociator" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -37,7 +37,7 @@ type getMetricDataProcessor interface { func runDiscoveryJob( ctx context.Context, - logger logging.Logger, + logger *slog.Logger, job model.DiscoveryJob, region string, clientTag tagging.Client, @@ -49,9 +49,9 @@ func runDiscoveryJob( resources, err := clientTag.GetResources(ctx, job, region) if err != nil { if errors.Is(err, tagging.ErrExpectedToFindResources) { - logger.Error(err, "No tagged resources made it through filtering") + logger.Error("No tagged resources made it through filtering", "err", err) } else { - logger.Error(err, "Couldn't describe resources") + logger.Error("Couldn't describe resources", "err", err) } return nil, nil } @@ -69,7 +69,7 @@ func runDiscoveryJob( getMetricDatas, err = gmdProcessor.Run(ctx, svc.Namespace, getMetricDatas) if err != nil { - logger.Error(err, "Failed to get metric data") + logger.Error("Failed to get metric data", "err", err) return nil, nil } @@ -78,7 +78,7 @@ func runDiscoveryJob( func getMetricDataForQueries( ctx context.Context, - logger logging.Logger, + logger *slog.Logger, discoveryJob model.DiscoveryJob, svc *config.ServiceConfig, clientCloudwatch cloudwatch.Client, @@ -113,7 +113,7 @@ func getMetricDataForQueries( mux.Unlock() }) if err != nil { - logger.Error(err, "Failed to get full metric list", "metric_name", metric.Name, "namespace", svc.Namespace) + logger.Error("Failed to get full metric list", "metric_name", metric.Name, "namespace", svc.Namespace, "err", err) return } }(metric) @@ -130,7 +130,7 @@ func (ns nopAssociator) AssociateMetricToResource(_ *model.Metric) (*model.Tagge } func getFilteredMetricDatas( - logger logging.Logger, + logger *slog.Logger, namespace string, tagsOnMetrics []string, metricsList []*model.Metric, @@ -146,13 +146,12 @@ func getFilteredMetricDatas( matchedResource, skip := assoc.AssociateMetricToResource(cwMetric) if skip { - if logger.IsDebugEnabled() { - dimensions := make([]string, 0, len(cwMetric.Dimensions)) - for _, dim := range cwMetric.Dimensions { - dimensions = append(dimensions, fmt.Sprintf("%s=%s", dim.Name, dim.Value)) - } - logger.Debug("skipping metric unmatched by associator", "metric", m.Name, "dimensions", strings.Join(dimensions, ",")) + dimensions := make([]string, 0, len(cwMetric.Dimensions)) + for _, dim := range cwMetric.Dimensions { + dimensions = append(dimensions, fmt.Sprintf("%s=%s", dim.Name, dim.Value)) } + logger.Debug("skipping metric unmatched by associator", "metric", m.Name, "dimensions", strings.Join(dimensions, ",")) + continue } diff --git a/pkg/job/discovery_test.go b/pkg/job/discovery_test.go index 0314b9d4d..cc96d1810 100644 --- a/pkg/job/discovery_test.go +++ b/pkg/job/discovery_test.go @@ -15,11 +15,11 @@ package job import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/assert" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/maxdimassociator" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -423,8 +423,8 @@ func Test_getFilteredMetricDatas(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assoc := maxdimassociator.NewAssociator(logging.NewNopLogger(), tt.args.dimensionRegexps, tt.args.resources) - metricDatas := getFilteredMetricDatas(logging.NewNopLogger(), tt.args.namespace, tt.args.tagsOnMetrics, tt.args.metricsList, tt.args.dimensionNameRequirements, tt.args.m, assoc) + assoc := maxdimassociator.NewAssociator(promslog.NewNopLogger(), tt.args.dimensionRegexps, tt.args.resources) + metricDatas := getFilteredMetricDatas(promslog.NewNopLogger(), tt.args.namespace, tt.args.tagsOnMetrics, tt.args.metricsList, tt.args.dimensionNameRequirements, tt.args.m, assoc) if len(metricDatas) != len(tt.wantGetMetricsData) { t.Errorf("len(getFilteredMetricDatas()) = %v, want %v", len(metricDatas), len(tt.wantGetMetricsData)) } diff --git a/pkg/job/getmetricdata/processor.go b/pkg/job/getmetricdata/processor.go index 734a3e9a4..21c8e7f6f 100644 --- a/pkg/job/getmetricdata/processor.go +++ b/pkg/job/getmetricdata/processor.go @@ -15,6 +15,7 @@ package getmetricdata import ( "context" "fmt" + "log/slog" "strconv" "strings" "time" @@ -22,7 +23,6 @@ import ( "golang.org/x/sync/errgroup" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -56,15 +56,15 @@ type Processor struct { client Client concurrency int windowCalculator MetricWindowCalculator - logger logging.Logger + logger *slog.Logger factory IteratorFactory } -func NewDefaultProcessor(logger logging.Logger, client Client, metricsPerQuery int, concurrency int) Processor { +func NewDefaultProcessor(logger *slog.Logger, client Client, metricsPerQuery int, concurrency int) Processor { return NewProcessor(logger, client, concurrency, MetricWindowCalculator{clock: TimeClock{}}, &iteratorFactory{metricsPerQuery: metricsPerQuery}) } -func NewProcessor(logger logging.Logger, client Client, concurrency int, windowCalculator MetricWindowCalculator, factory IteratorFactory) Processor { +func NewProcessor(logger *slog.Logger, client Client, concurrency int, windowCalculator MetricWindowCalculator, factory IteratorFactory) Processor { return Processor{ logger: logger, client: client, @@ -88,9 +88,7 @@ func (p Processor) Run(ctx context.Context, namespace string, requests []*model. g.Go(func() error { batch = addQueryIDsToBatch(batch) startTime, endTime := p.windowCalculator.Calculate(toSecondDuration(batchParams.Period), toSecondDuration(batchParams.Length), toSecondDuration(batchParams.Delay)) - if p.logger.IsDebugEnabled() { - p.logger.Debug("GetMetricData Window", "start_time", startTime.Format(TimeFormat), "end_time", endTime.Format(TimeFormat)) - } + p.logger.Debug("GetMetricData Window", "start_time", startTime.Format(TimeFormat), "end_time", endTime.Format(TimeFormat)) data := p.client.GetMetricData(gCtx, batch, namespace, startTime, endTime) if data != nil { @@ -124,7 +122,7 @@ func addQueryIDsToBatch(batch []*model.CloudwatchData) []*model.CloudwatchData { return batch } -func mapResultsToBatch(logger logging.Logger, results []cloudwatch.MetricDataResult, batch []*model.CloudwatchData) { +func mapResultsToBatch(logger *slog.Logger, results []cloudwatch.MetricDataResult, batch []*model.CloudwatchData) { for _, entry := range results { id, err := queryIDToIndex(entry.ID) if err != nil { diff --git a/pkg/job/getmetricdata/processor_test.go b/pkg/job/getmetricdata/processor_test.go index ec37df2c9..3ccca6441 100644 --- a/pkg/job/getmetricdata/processor_test.go +++ b/pkg/job/getmetricdata/processor_test.go @@ -19,11 +19,11 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -200,7 +200,7 @@ func TestProcessor_Run(t *testing.T) { if tt.metricsPerBatch != 0 { metricsPerQuery = tt.metricsPerBatch } - r := NewDefaultProcessor(logging.NewNopLogger(), testClient{GetMetricDataResultForMetrics: tt.metricDataResultForMetrics}, metricsPerQuery, 1) + r := NewDefaultProcessor(promslog.NewNopLogger(), testClient{GetMetricDataResultForMetrics: tt.metricDataResultForMetrics}, metricsPerQuery, 1) cloudwatchData, err := r.Run(context.Background(), "anything_is_fine", ToCloudwatchData(tt.requests)) require.NoError(t, err) require.Len(t, cloudwatchData, len(tt.want)) @@ -334,7 +334,7 @@ func doBench(b *testing.B, metricsPerQuery, testResourcesCount int, concurrency for i := 0; i < testResourcesCount; i++ { datas = append(datas, getSampleMetricDatas(testResourceIDs[i])) } - r := NewDefaultProcessor(logging.NewNopLogger(), client, metricsPerQuery, concurrency) + r := NewDefaultProcessor(promslog.NewNopLogger(), client, metricsPerQuery, concurrency) // re-start timer b.ReportAllocs() b.StartTimer() diff --git a/pkg/job/maxdimassociator/associator.go b/pkg/job/maxdimassociator/associator.go index dc8bf7dcb..27729212a 100644 --- a/pkg/job/maxdimassociator/associator.go +++ b/pkg/job/maxdimassociator/associator.go @@ -15,13 +15,13 @@ package maxdimassociator import ( "cmp" "fmt" + "log/slog" "slices" "strings" "github.com/grafana/regexp" prom_model "github.com/prometheus/common/model" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -36,7 +36,7 @@ type Associator struct { // mappings is a slice of dimensions-based mappings, one for each regex of a given namespace mappings []*dimensionsRegexpMapping - logger logging.Logger + logger *slog.Logger } type dimensionsRegexpMapping struct { @@ -68,7 +68,7 @@ func (rm dimensionsRegexpMapping) toString() string { } // NewAssociator builds all mappings for the given dimensions regexps and list of resources. -func NewAssociator(logger logging.Logger, dimensionsRegexps []model.DimensionsRegexp, resources []*model.TaggedResource) Associator { +func NewAssociator(logger *slog.Logger, dimensionsRegexps []model.DimensionsRegexp, resources []*model.TaggedResource) Associator { assoc := Associator{ mappings: []*dimensionsRegexpMapping{}, logger: logger, @@ -114,9 +114,7 @@ func NewAssociator(logger logging.Logger, dimensionsRegexps []model.DimensionsRe // example when we define multiple regexps (to capture sibling // or sub-resources) and one of them doesn't match any resource. // This behaviour is ok, we just want to debug log to keep track of it. - if logger.IsDebugEnabled() { - logger.Debug("unable to define a regex mapping", "regex", dr.Regexp.String()) - } + logger.Debug("unable to define a regex mapping", "regex", dr.Regexp.String()) } // sort all mappings by decreasing number of dimensions names @@ -126,10 +124,8 @@ func NewAssociator(logger logging.Logger, dimensionsRegexps []model.DimensionsRe return -1 * cmp.Compare(len(a.dimensions), len(b.dimensions)) }) - if logger.IsDebugEnabled() { - for idx, regexpMapping := range assoc.mappings { - logger.Debug("associator mapping", "mapping_idx", idx, "mapping", regexpMapping.toString()) - } + for idx, regexpMapping := range assoc.mappings { + logger.Debug("associator mapping", "mapping_idx", idx, "mapping", regexpMapping.toString()) } return assoc @@ -154,9 +150,7 @@ func (assoc Associator) AssociateMetricToResource(cwMetric *model.Metric) (*mode dimensions = append(dimensions, dimension.Name) } - if logger.IsDebugEnabled() { - logger.Debug("associate loop start", "dimensions", strings.Join(dimensions, ",")) - } + logger.Debug("associate loop start", "dimensions", strings.Join(dimensions, ",")) // Attempt to find the regex mapping which contains the most // (but not necessarily all) the metric's dimensions names. @@ -165,9 +159,7 @@ func (assoc Associator) AssociateMetricToResource(cwMetric *model.Metric) (*mode mappingFound := false for idx, regexpMapping := range assoc.mappings { if containsAll(dimensions, regexpMapping.dimensions) { - if logger.IsDebugEnabled() { - logger.Debug("found mapping", "mapping_idx", idx, "mapping", regexpMapping.toString()) - } + logger.Debug("found mapping", "mapping_idx", idx, "mapping", regexpMapping.toString()) // A regex mapping has been found. The metric has all (and possibly more) // the dimensions computed for the mapping. Now compute a signature diff --git a/pkg/job/maxdimassociator/associator_api_gateway_test.go b/pkg/job/maxdimassociator/associator_api_gateway_test.go index 2639884bf..8ded63371 100644 --- a/pkg/job/maxdimassociator/associator_api_gateway_test.go +++ b/pkg/job/maxdimassociator/associator_api_gateway_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -146,7 +146,7 @@ func TestAssociatorAPIGateway(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_client_vpn_test.go b/pkg/job/maxdimassociator/associator_client_vpn_test.go index e1913ea20..370e89982 100644 --- a/pkg/job/maxdimassociator/associator_client_vpn_test.go +++ b/pkg/job/maxdimassociator/associator_client_vpn_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -62,7 +62,7 @@ func TestAssociatorClientVPN(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_ddosprotection_test.go b/pkg/job/maxdimassociator/associator_ddosprotection_test.go index fb1df3306..6e8f9667a 100644 --- a/pkg/job/maxdimassociator/associator_ddosprotection_test.go +++ b/pkg/job/maxdimassociator/associator_ddosprotection_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/assert" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -72,7 +72,7 @@ func TestAssociatorDDoSProtection(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) assert.Equal(t, tc.expectedSkip, skip) assert.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_dx_test.go b/pkg/job/maxdimassociator/associator_dx_test.go index 784f78059..545016592 100644 --- a/pkg/job/maxdimassociator/associator_dx_test.go +++ b/pkg/job/maxdimassociator/associator_dx_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -63,7 +63,7 @@ func TestAssociatorDX(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_ec2_test.go b/pkg/job/maxdimassociator/associator_ec2_test.go index dceafd48e..5d97609f2 100644 --- a/pkg/job/maxdimassociator/associator_ec2_test.go +++ b/pkg/job/maxdimassociator/associator_ec2_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -120,7 +120,7 @@ func TestAssociatorEC2(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_ec_test.go b/pkg/job/maxdimassociator/associator_ec_test.go index 7f007c7b3..8cf0dfe07 100644 --- a/pkg/job/maxdimassociator/associator_ec_test.go +++ b/pkg/job/maxdimassociator/associator_ec_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -120,7 +120,7 @@ func TestAssociatorEC(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_ecs_test.go b/pkg/job/maxdimassociator/associator_ecs_test.go index ad640b1c1..2c5368054 100644 --- a/pkg/job/maxdimassociator/associator_ecs_test.go +++ b/pkg/job/maxdimassociator/associator_ecs_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -112,7 +112,7 @@ func TestAssociatorECS(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_event_roles_test.go b/pkg/job/maxdimassociator/associator_event_roles_test.go index 6d8d511e9..069ffe86f 100644 --- a/pkg/job/maxdimassociator/associator_event_roles_test.go +++ b/pkg/job/maxdimassociator/associator_event_roles_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -72,7 +72,7 @@ func TestAssociatorEventRule(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_globalaccelerator_test.go b/pkg/job/maxdimassociator/associator_globalaccelerator_test.go index a917bc5ac..c91b683da 100644 --- a/pkg/job/maxdimassociator/associator_globalaccelerator_test.go +++ b/pkg/job/maxdimassociator/associator_globalaccelerator_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -113,7 +113,7 @@ func TestAssociatorGlobalAccelerator(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_gwlb_test.go b/pkg/job/maxdimassociator/associator_gwlb_test.go index e4ece1f85..d9daa7298 100644 --- a/pkg/job/maxdimassociator/associator_gwlb_test.go +++ b/pkg/job/maxdimassociator/associator_gwlb_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -111,7 +111,7 @@ func TestAssociatorGwlb(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_ipam_test.go b/pkg/job/maxdimassociator/associator_ipam_test.go index e0993f1a6..eaed6c8e2 100644 --- a/pkg/job/maxdimassociator/associator_ipam_test.go +++ b/pkg/job/maxdimassociator/associator_ipam_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -82,7 +82,7 @@ func TestAssociatorIpam(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_kms_test.go b/pkg/job/maxdimassociator/associator_kms_test.go index 2aa04897e..d80162420 100644 --- a/pkg/job/maxdimassociator/associator_kms_test.go +++ b/pkg/job/maxdimassociator/associator_kms_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -62,7 +62,7 @@ func TestAssociatorKMS(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_lambda_test.go b/pkg/job/maxdimassociator/associator_lambda_test.go index 8649df8cc..aaec1a194 100644 --- a/pkg/job/maxdimassociator/associator_lambda_test.go +++ b/pkg/job/maxdimassociator/associator_lambda_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -111,7 +111,7 @@ func TestAssociatorLambda(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_logs_test.go b/pkg/job/maxdimassociator/associator_logs_test.go index c9b4fa631..71d7717f8 100644 --- a/pkg/job/maxdimassociator/associator_logs_test.go +++ b/pkg/job/maxdimassociator/associator_logs_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -104,7 +104,7 @@ func TestAssociatorLogs(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_mediaconvert_test.go b/pkg/job/maxdimassociator/associator_mediaconvert_test.go index d55885e79..3f1bde679 100644 --- a/pkg/job/maxdimassociator/associator_mediaconvert_test.go +++ b/pkg/job/maxdimassociator/associator_mediaconvert_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -104,7 +104,7 @@ func TestAssociatorMediaConvert(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_memorydb_test.go b/pkg/job/maxdimassociator/associator_memorydb_test.go index 582c1ffe5..8cce70bde 100644 --- a/pkg/job/maxdimassociator/associator_memorydb_test.go +++ b/pkg/job/maxdimassociator/associator_memorydb_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -120,7 +120,7 @@ func TestAssociatorMemoryDB(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_mq_test.go b/pkg/job/maxdimassociator/associator_mq_test.go index 8daed5379..5bc055b99 100644 --- a/pkg/job/maxdimassociator/associator_mq_test.go +++ b/pkg/job/maxdimassociator/associator_mq_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -87,7 +87,7 @@ func TestAssociatorMQ(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_qldb_test.go b/pkg/job/maxdimassociator/associator_qldb_test.go index e444764b0..d8c4e3ab4 100644 --- a/pkg/job/maxdimassociator/associator_qldb_test.go +++ b/pkg/job/maxdimassociator/associator_qldb_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -78,7 +78,7 @@ func TestAssociatorQLDB(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_endpoint_test.go b/pkg/job/maxdimassociator/associator_sagemaker_endpoint_test.go index 7f5348b3d..4778e1b1c 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_endpoint_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_endpoint_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -90,7 +90,7 @@ func TestAssociatorSagemakerEndpoint(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_inf_rec_test.go b/pkg/job/maxdimassociator/associator_sagemaker_inf_rec_test.go index 9629fa330..16733317b 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_inf_rec_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_inf_rec_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -66,7 +66,7 @@ func TestAssociatorSagemakerInfRecJob(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_pipeline_test.go b/pkg/job/maxdimassociator/associator_sagemaker_pipeline_test.go index de4e02f31..4a393b5d0 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_pipeline_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_pipeline_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -106,7 +106,7 @@ func TestAssociatorSagemakerPipeline(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_processing_test.go b/pkg/job/maxdimassociator/associator_sagemaker_processing_test.go index 29fbbed79..e39acdce0 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_processing_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_processing_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -66,7 +66,7 @@ func TestAssociatorSagemakerProcessingJob(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_test.go b/pkg/job/maxdimassociator/associator_sagemaker_test.go index 02b09a308..19152e16f 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -131,7 +131,7 @@ func TestAssociatorSagemaker(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_training_test.go b/pkg/job/maxdimassociator/associator_sagemaker_training_test.go index 029a60d35..5a123148b 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_training_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_training_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -66,7 +66,7 @@ func TestAssociatorSagemakerTrainingJob(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/maxdimassociator/associator_sagemaker_transform_test.go b/pkg/job/maxdimassociator/associator_sagemaker_transform_test.go index 7b1456d75..106544b8c 100644 --- a/pkg/job/maxdimassociator/associator_sagemaker_transform_test.go +++ b/pkg/job/maxdimassociator/associator_sagemaker_transform_test.go @@ -15,10 +15,10 @@ package maxdimassociator import ( "testing" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -66,7 +66,7 @@ func TestAssociatorSagemakerTransformJob(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - associator := NewAssociator(logging.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) + associator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources) res, skip := associator.AssociateMetricToResource(tc.args.metric) require.Equal(t, tc.expectedSkip, skip) require.Equal(t, tc.expectedResource, res) diff --git a/pkg/job/scrape.go b/pkg/job/scrape.go index f6e262d9a..99888f0bf 100644 --- a/pkg/job/scrape.go +++ b/pkg/job/scrape.go @@ -14,19 +14,19 @@ package job import ( "context" + "log/slog" "sync" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/getmetricdata" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) func ScrapeAwsData( ctx context.Context, - logger logging.Logger, + logger *slog.Logger, jobsCfg model.JobsConfig, factory clients.Factory, metricsPerQuery int, @@ -47,7 +47,7 @@ func ScrapeAwsData( jobLogger := logger.With("job_type", discoveryJob.Type, "region", region, "arn", role.RoleArn) accountID, err := factory.GetAccountClient(region, role).GetAccount(ctx) if err != nil { - jobLogger.Error(err, "Couldn't get account Id") + jobLogger.Error("Couldn't get account Id", "err", err) return } jobLogger = jobLogger.With("account", accountID) @@ -101,7 +101,7 @@ func ScrapeAwsData( jobLogger := logger.With("static_job_name", staticJob.Name, "region", region, "arn", role.RoleArn) accountID, err := factory.GetAccountClient(region, role).GetAccount(ctx) if err != nil { - jobLogger.Error(err, "Couldn't get account Id") + jobLogger.Error("Couldn't get account Id", "err", err) return } jobLogger = jobLogger.With("account", accountID) @@ -138,7 +138,7 @@ func ScrapeAwsData( jobLogger := logger.With("custom_metric_namespace", customNamespaceJob.Namespace, "region", region, "arn", role.RoleArn) accountID, err := factory.GetAccountClient(region, role).GetAccount(ctx) if err != nil { - jobLogger.Error(err, "Couldn't get account Id") + jobLogger.Error("Couldn't get account Id", "err", err) return } jobLogger = jobLogger.With("account", accountID) diff --git a/pkg/job/scraper.go b/pkg/job/scraper.go index 4719abb1f..55bd384ed 100644 --- a/pkg/job/scraper.go +++ b/pkg/job/scraper.go @@ -15,25 +15,25 @@ package job import ( "context" "fmt" + "log/slog" "sync" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/cloudwatchrunner" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) type Scraper struct { jobsCfg model.JobsConfig - logger logging.Logger + logger *slog.Logger runnerFactory runnerFactory } type runnerFactory interface { GetAccountClient(region string, role model.Role) account.Client - NewResourceMetadataRunner(logger logging.Logger, region string, role model.Role) ResourceMetadataRunner - NewCloudWatchRunner(logger logging.Logger, region string, role model.Role, job cloudwatchrunner.Job) CloudwatchRunner + NewResourceMetadataRunner(logger *slog.Logger, region string, role model.Role) ResourceMetadataRunner + NewCloudWatchRunner(logger *slog.Logger, region string, role model.Role, job cloudwatchrunner.Job) CloudwatchRunner } type ResourceMetadataRunner interface { @@ -44,7 +44,7 @@ type CloudwatchRunner interface { Run(ctx context.Context) ([]*model.CloudwatchData, error) } -func NewScraper(logger logging.Logger, +func NewScraper(logger *slog.Logger, jobsCfg model.JobsConfig, runnerFactory runnerFactory, ) *Scraper { @@ -221,7 +221,7 @@ func jobConfigVisitor(jobsCfg model.JobsConfig, action func(job any, role model. } // Take an action depending on the job type, only supports discovery and custom job types -func jobAction(logger logging.Logger, job any, discovery func(job model.DiscoveryJob), custom func(job model.CustomNamespaceJob)) { +func jobAction(logger *slog.Logger, job any, discovery func(job model.DiscoveryJob), custom func(job model.CustomNamespaceJob)) { // Type switches are free https://stackoverflow.com/a/28027945 switch typedJob := job.(type) { case model.DiscoveryJob: @@ -229,7 +229,7 @@ func jobAction(logger logging.Logger, job any, discovery func(job model.Discover case model.CustomNamespaceJob: custom(typedJob) default: - logger.Error(fmt.Errorf("config type of %T is not supported", typedJob), "Unexpected job type") + logger.Error("Unexpected job type", "err", fmt.Errorf("config type of %T is not supported", typedJob)) return } } diff --git a/pkg/job/scraper_test.go b/pkg/job/scraper_test.go index f8c8be75e..0ac4c43c6 100644 --- a/pkg/job/scraper_test.go +++ b/pkg/job/scraper_test.go @@ -15,18 +15,19 @@ package job_test import ( "context" "errors" + "log/slog" "reflect" "testing" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/prometheus/common/promslog" "github.com/r3labs/diff/v3" "github.com/stretchr/testify/assert" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/cloudwatchrunner" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -53,11 +54,11 @@ func (t *testRunnerFactory) GetAccountClient(string, model.Role) account.Client return t } -func (t *testRunnerFactory) NewResourceMetadataRunner(logging.Logger, string, model.Role) job.ResourceMetadataRunner { +func (t *testRunnerFactory) NewResourceMetadataRunner(*slog.Logger, string, model.Role) job.ResourceMetadataRunner { return &testMetadataRunner{RunFunc: t.MetadataRunFunc} } -func (t *testRunnerFactory) NewCloudWatchRunner(_ logging.Logger, _ string, _ model.Role, job cloudwatchrunner.Job) job.CloudwatchRunner { +func (t *testRunnerFactory) NewCloudWatchRunner(_ *slog.Logger, _ string, _ model.Role, job cloudwatchrunner.Job) job.CloudwatchRunner { return &testCloudwatchRunner{Job: job, RunFunc: t.CloudwatchRunFunc} } @@ -540,7 +541,9 @@ func TestScrapeRunner_Run(t *testing.T) { MetadataRunFunc: tc.metadataRunFunc, CloudwatchRunFunc: tc.cloudwatchRunFunc, } - sr := job.NewScraper(logging.NewLogger("", true), tc.jobsCfg, &rf) + lvl := &promslog.AllowedLevel{} + _ = lvl.Set("debug") + sr := job.NewScraper(promslog.New(&promslog.Config{Level: lvl}), tc.jobsCfg, &rf) resources, metrics, errs := sr.Scrape(context.Background()) changelog, err := diff.Diff(tc.expectedResources, resources) diff --git a/pkg/job/static.go b/pkg/job/static.go index eaf40eeff..5b3cd4fe5 100644 --- a/pkg/job/static.go +++ b/pkg/job/static.go @@ -14,16 +14,16 @@ package job import ( "context" + "log/slog" "sync" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) func runStaticJob( ctx context.Context, - logger logging.Logger, + logger *slog.Logger, resource model.StaticJob, clientCloudwatch cloudwatch.Client, ) []*model.CloudwatchData { diff --git a/pkg/logging/logger.go b/pkg/logging/logger.go deleted file mode 100644 index 178c4db04..000000000 --- a/pkg/logging/logger.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "os" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" -) - -type Logger interface { - Info(message string, keyvals ...interface{}) - Debug(message string, keyvals ...interface{}) - Error(err error, message string, keyvals ...interface{}) - Warn(message string, keyvals ...interface{}) - With(keyvals ...interface{}) Logger - IsDebugEnabled() bool -} - -type gokitLogger struct { - logger log.Logger - debugEnabled bool -} - -func NewLogger(format string, debugEnabled bool, keyvals ...interface{}) Logger { - var logger log.Logger - if format == "json" { - logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) - } else { - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - } - - if debugEnabled { - logger = level.NewFilter(logger, level.AllowDebug()) - } else { - logger = level.NewFilter(logger, level.AllowInfo()) - } - - logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.Caller(4)) - logger = log.With(logger, keyvals...) - - return gokitLogger{ - logger: logger, - debugEnabled: debugEnabled, - } -} - -func NewNopLogger() Logger { - return gokitLogger{logger: log.NewNopLogger()} -} - -func (g gokitLogger) Debug(message string, keyvals ...interface{}) { - if g.debugEnabled { - kv := []interface{}{"msg", message} - kv = append(kv, keyvals...) - level.Debug(g.logger).Log(kv...) - } -} - -func (g gokitLogger) Info(message string, keyvals ...interface{}) { - kv := []interface{}{"msg", message} - kv = append(kv, keyvals...) - level.Info(g.logger).Log(kv...) -} - -func (g gokitLogger) Error(err error, message string, keyvals ...interface{}) { - kv := []interface{}{"msg", message, "err", err} - kv = append(kv, keyvals...) - level.Error(g.logger).Log(kv...) -} - -func (g gokitLogger) Warn(message string, keyvals ...interface{}) { - kv := []interface{}{"msg", message} - kv = append(kv, keyvals...) - level.Warn(g.logger).Log(kv...) -} - -func (g gokitLogger) With(keyvals ...interface{}) Logger { - return gokitLogger{ - logger: log.With(g.logger, keyvals...), - debugEnabled: g.debugEnabled, - } -} - -func (g gokitLogger) IsDebugEnabled() bool { - return g.debugEnabled -} diff --git a/pkg/promutil/migrate.go b/pkg/promutil/migrate.go index 8161f00bc..6457d1364 100644 --- a/pkg/promutil/migrate.go +++ b/pkg/promutil/migrate.go @@ -14,6 +14,7 @@ package promutil import ( "fmt" + "log/slog" "maps" "math" "sort" @@ -23,7 +24,6 @@ import ( "github.com/grafana/regexp" prom_model "github.com/prometheus/common/model" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -56,7 +56,7 @@ func BuildMetricName(namespace, metricName, statistic string) string { return sb.String() } -func BuildNamespaceInfoMetrics(tagData []model.TaggedResourceResult, metrics []*PrometheusMetric, observedMetricLabels map[string]model.LabelSet, labelsSnakeCase bool, logger logging.Logger) ([]*PrometheusMetric, map[string]model.LabelSet) { +func BuildNamespaceInfoMetrics(tagData []model.TaggedResourceResult, metrics []*PrometheusMetric, observedMetricLabels map[string]model.LabelSet, labelsSnakeCase bool, logger *slog.Logger) ([]*PrometheusMetric, map[string]model.LabelSet) { for _, tagResult := range tagData { contextLabels := contextToLabels(tagResult.Context, labelsSnakeCase, logger) for _, d := range tagResult.Data { @@ -88,7 +88,7 @@ func BuildNamespaceInfoMetrics(tagData []model.TaggedResourceResult, metrics []* return metrics, observedMetricLabels } -func BuildMetrics(results []model.CloudwatchMetricResult, labelsSnakeCase bool, logger logging.Logger) ([]*PrometheusMetric, map[string]model.LabelSet, error) { +func BuildMetrics(results []model.CloudwatchMetricResult, labelsSnakeCase bool, logger *slog.Logger) ([]*PrometheusMetric, map[string]model.LabelSet, error) { output := make([]*PrometheusMetric, 0) observedMetricLabels := make(map[string]model.LabelSet) @@ -221,7 +221,7 @@ func sortByTimestamp(datapoints []*model.Datapoint) []*model.Datapoint { return datapoints } -func createPrometheusLabels(cwd *model.CloudwatchData, labelsSnakeCase bool, contextLabels map[string]string, logger logging.Logger) map[string]string { +func createPrometheusLabels(cwd *model.CloudwatchData, labelsSnakeCase bool, contextLabels map[string]string, logger *slog.Logger) map[string]string { labels := make(map[string]string, len(cwd.Dimensions)+len(cwd.Tags)+len(contextLabels)) labels["name"] = cwd.ResourceName @@ -249,7 +249,7 @@ func createPrometheusLabels(cwd *model.CloudwatchData, labelsSnakeCase bool, con return labels } -func contextToLabels(context *model.ScrapeContext, labelsSnakeCase bool, logger logging.Logger) map[string]string { +func contextToLabels(context *model.ScrapeContext, labelsSnakeCase bool, logger *slog.Logger) map[string]string { if context == nil { return map[string]string{} } diff --git a/pkg/promutil/migrate_test.go b/pkg/promutil/migrate_test.go index e9c9ccdf0..f617007d3 100644 --- a/pkg/promutil/migrate_test.go +++ b/pkg/promutil/migrate_test.go @@ -18,9 +18,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/logging" "github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model" ) @@ -278,7 +278,7 @@ func TestBuildNamespaceInfoMetrics(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - metrics, labels := BuildNamespaceInfoMetrics(tc.resources, tc.metrics, tc.observedMetricLabels, tc.labelsSnakeCase, logging.NewNopLogger()) + metrics, labels := BuildNamespaceInfoMetrics(tc.resources, tc.metrics, tc.observedMetricLabels, tc.labelsSnakeCase, promslog.NewNopLogger()) require.Equal(t, tc.expectedMetrics, metrics) require.Equal(t, tc.expectedLabels, labels) }) @@ -959,7 +959,7 @@ func TestBuildMetrics(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - res, labels, err := BuildMetrics(tc.data, tc.labelsSnakeCase, logging.NewNopLogger()) + res, labels, err := BuildMetrics(tc.data, tc.labelsSnakeCase, promslog.NewNopLogger()) if tc.expectedErr != nil { require.Equal(t, tc.expectedErr, err) } else { @@ -1086,7 +1086,7 @@ func Benchmark_BuildMetrics(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - _, labels, err = BuildMetrics(data, false, logging.NewNopLogger()) + _, labels, err = BuildMetrics(data, false, promslog.NewNopLogger()) } expectedLabels := map[string]model.LabelSet{