From 72c9c046b392bad9dc087e8f345eb56c945d4e2a Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Wed, 20 Nov 2024 19:32:57 +0800 Subject: [PATCH 001/170] feat(outputs): Add inlong output plugin --- plugins/outputs/all/inlong.go | 5 ++ plugins/outputs/inlong/README.md | 46 +++++++++++++++ plugins/outputs/inlong/inlong.go | 85 +++++++++++++++++++++++++++ plugins/outputs/inlong/inlong_test.go | 76 ++++++++++++++++++++++++ plugins/outputs/inlong/sample.conf | 32 ++++++++++ 5 files changed, 244 insertions(+) create mode 100644 plugins/outputs/all/inlong.go create mode 100644 plugins/outputs/inlong/README.md create mode 100644 plugins/outputs/inlong/inlong.go create mode 100644 plugins/outputs/inlong/inlong_test.go create mode 100644 plugins/outputs/inlong/sample.conf diff --git a/plugins/outputs/all/inlong.go b/plugins/outputs/all/inlong.go new file mode 100644 index 0000000000000..3e90277ea1e1c --- /dev/null +++ b/plugins/outputs/all/inlong.go @@ -0,0 +1,5 @@ +//go:build !custom || outputs || outputs.file + +package all + +import _ "github.com/influxdata/telegraf/plugins/outputs/inlong" // register plugin diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md new file mode 100644 index 0000000000000..a7a7d347eb4d3 --- /dev/null +++ b/plugins/outputs/inlong/README.md @@ -0,0 +1,46 @@ +# Inlong Output Plugin + +This plugin writes telegraf metrics to Inlong + +## Global configuration options + +In addition to the plugin-specific configuration settings, plugins support +additional global and plugin configuration settings. These settings are used to +modify metrics, tags, and field or create aliases and configure ordering, etc. +See the [CONFIGURATION.md][CONFIGURATION.md] for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Configuration + +```toml @sample.conf +# Send telegraf metrics to Inlong +[[outputs.inlong]] + ## From the inlong system, data Streams Group, it contains multiple data streams, and one Group represents + ## one data business unit. + inlong_group_id = "test_group" + + ## From the inlong system, data Stream, a stream has a specific data source, data format and data sink. + inlong_stream_id = "test_stream" + + ## Retry interval, the interval to resend the message in the resend queue. + retry_interval_ms = 10 + + ## The URL used to obtain the Inlong DataProxy IP list to which the data will be sent + inlong_manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Suggest using CSV format here, as inlong is also processed in CSV format + data_format = "csv" + + ## The delimiter used when serializing data in CSV format needs to be consistent with the delimiter + ## configured for inlong, so that the data can be parsed properly after it reaches inlong + csv_separator = "|" + + ## The final output field order here needs to be consistent with the field order defined by the data + ## stream in inlong + csv_columns = ["field.key","file.value"] +``` diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go new file mode 100644 index 0000000000000..c6224bfbc5775 --- /dev/null +++ b/plugins/outputs/inlong/inlong.go @@ -0,0 +1,85 @@ +package inlong + +import ( + "context" + _ "embed" + "fmt" + "github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang/dataproxy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +//go:embed sample.conf +var sampleConfig string + +type Inlong struct { + GroupId string `toml:"group_id"` + StreamId string `toml:"stream_id"` + ManagerUrl string `toml:"manager_url"` + RetryIntervalMs int64 `toml:"retry_interval_ms"` + Log telegraf.Logger `toml:"-"` + + producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) + producer dataproxy.Client + serializer serializers.Serializer +} + +func (i *Inlong) SampleConfig() string { + return sampleConfig +} + +func (i *Inlong) SetSerializer(serializer serializers.Serializer) { + i.serializer = serializer +} + +func (i *Inlong) Connect() error { + producer, err := i.producerFunc(i.GroupId, i.ManagerUrl) + if err != nil { + return &internal.StartupError{Err: err, Retry: true} + } + i.producer = producer + return nil +} + +func (i *Inlong) Close() error { + i.producer.Close() + return nil +} + +func (i *Inlong) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + b, err := i.serializer.Serialize(metric) + if err != nil { + return fmt.Errorf("could not serialize metric: %w", err) + } + err = i.producer.Send(context.Background(), dataproxy.Message{ + GroupID: i.GroupId, + StreamID: i.StreamId, + Payload: b, + }) + if err != nil { + return err + } + } + return nil +} + +func init() { + outputs.Add("inlong", func() telegraf.Output { + return &Inlong{ + producerFunc: func(id string, url string) (dataproxy.Client, error) { + producer, err := dataproxy.NewClient( + dataproxy.WithGroupID(id), + dataproxy.WithURL(url), + ) + if err != nil { + fmt.Println(err) + return nil, err + } + return producer, nil + }, + } + }) +} diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go new file mode 100644 index 0000000000000..110b90c6ec82b --- /dev/null +++ b/plugins/outputs/inlong/inlong_test.go @@ -0,0 +1,76 @@ +package inlong + +import ( + "context" + "github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang/dataproxy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers/csv" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +type MockProducer struct { + groupId string + managerUrl string +} + +func (p *MockProducer) Send(ctx context.Context, msg dataproxy.Message) error { + + return nil +} + +func (p *MockProducer) SendAsync(ctx context.Context, msg dataproxy.Message, callback dataproxy.Callback) { + return +} + +func (p *MockProducer) Close() { + return +} + +func (p *MockProducer) SendMessage(ctx context.Context, msg dataproxy.Message) error { + return nil +} + +func NewMockProducer(groupId string, managerUrl string) (dataproxy.Client, error) { + p := &MockProducer{} + p.groupId = groupId + p.managerUrl = managerUrl + return p, nil +} + +func TestInlong_Connect(t *testing.T) { + t.Run("", func(t *testing.T) { + i := &Inlong{ + producerFunc: NewMockProducer, + } + require.NoError(t, i.Connect()) + }) +} + +func TestInlong_Write(t *testing.T) { + s := &csv.Serializer{Header: true} + s.Init() + t.Run("", func(t *testing.T) { + producer := &MockProducer{} + i := &Inlong{ + producer: producer, + serializer: s, + } + m := metric.New( + "cpu", + map[string]string{ + "topic": "xyzzy", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + var metrics []telegraf.Metric + metrics = append(metrics, m) + require.NoError(t, i.Write(metrics)) + }) + +} diff --git a/plugins/outputs/inlong/sample.conf b/plugins/outputs/inlong/sample.conf new file mode 100644 index 0000000000000..b8828a73fcebe --- /dev/null +++ b/plugins/outputs/inlong/sample.conf @@ -0,0 +1,32 @@ +# Send telegraf metrics to Inlong +[[outputs.inlong]] + ## From the inlong system, data Streams Group, it contains multiple data streams, and one Group represents + ## one data business unit. + inlong_group_id = "test_group" + + ## From the inlong system, data Stream, a stream has a specific data source, data format and data sink. + inlong_stream_id = "test_stream" + + ## The URL used to obtain the Inlong DataProxy IP list to which the data will be sent + inlong_manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" + + ## Retry interval, the interval to resend the message in the resend queue. + retry_interval_ms = 10 + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Suggest using CSV format here, as inlong is also processed in CSV format + data_format = "csv" + + ## The delimiter used when serializing data in CSV format needs to be consistent with the delimiter + ## configured for inlong, so that the data can be parsed properly after it reaches inlong + csv_separator = "|" + + ## The final output field order here needs to be consistent with the field order defined by the data + ## stream in inlong + csv_columns = ["field.key","file.value"] + + + From fb51e47543ce92b75c9e3dec12876c744d67f1ea Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Wed, 20 Nov 2024 19:52:16 +0800 Subject: [PATCH 002/170] feat(outputs): Save go mod --- go.mod | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/go.mod b/go.mod index 7ecbfd5e18d30..3ff4ad5fe2563 100644 --- a/go.mod +++ b/go.mod @@ -39,6 +39,7 @@ require ( github.com/antchfx/xmlquery v1.4.1 github.com/antchfx/xpath v1.3.1 github.com/apache/arrow/go/v18 v18.0.0-20240716144821-cf5d7c7ec3cf + github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang v0.0.0-20241120061539-3700baa19f68 github.com/apache/iotdb-client-go v1.3.2 github.com/apache/thrift v0.21.0 github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 @@ -298,6 +299,7 @@ require ( github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/bufbuild/protocompile v0.10.0 // indirect + github.com/bwmarrin/snowflake v0.3.0 // indirect github.com/caio/go-tdigest/v4 v4.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -335,6 +337,7 @@ require ( github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.1-0.20211022031912-bfb69110f8dd // indirect @@ -434,6 +437,8 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect + github.com/panjf2000/gnet/v2 v2.5.7 // indirect github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pion/logging v0.2.2 // indirect @@ -470,6 +475,7 @@ require ( github.com/twmb/murmur3 v1.1.7 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect @@ -481,6 +487,7 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/assert v1.3.1 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect + github.com/zentures/cityhash v0.0.0-20131128155616-cdd6a94144ab // indirect github.com/zitadel/logging v0.6.1 // indirect github.com/zitadel/oidc/v3 v3.30.0 // indirect github.com/zitadel/schema v1.3.0 // indirect @@ -508,6 +515,7 @@ require ( gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/sourcemap.v1 v1.0.5 // indirect gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect From 81a7d4c9808e687a52a97624fd8110fff94c9fe0 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Wed, 20 Nov 2024 19:54:06 +0800 Subject: [PATCH 003/170] feat(outputs): Save go sum --- go.sum | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index 5269eb144d935..f8e65aa44ba19 100644 --- a/go.sum +++ b/go.sum @@ -822,6 +822,8 @@ github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcy github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= github.com/apache/arrow/go/v18 v18.0.0-20240716144821-cf5d7c7ec3cf h1:9b4bG4uqvid0RH3MHWq2soXTfhPFbqbuNCqLRrl4ZGg= github.com/apache/arrow/go/v18 v18.0.0-20240716144821-cf5d7c7ec3cf/go.mod h1:84kVJOfdiXAj9Zo8lvZ2uuJVzPn2vKlPdrSHU1zD2mE= +github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang v0.0.0-20241120061539-3700baa19f68 h1:IaHtzTmjOUvGo2JxGmDVFVwCFnphlwaFmWv3kfrCC9M= +github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang v0.0.0-20241120061539-3700baa19f68/go.mod h1:d+xqug/5+N0HnQVDf+0gTyB/SFIGjD3VpCbTalvBXzM= github.com/apache/iotdb-client-go v1.3.2 h1:IPPVlOganGJ6Q0NTWtktLgsvsuG9YIRP1U6nhO9ee6k= github.com/apache/iotdb-client-go v1.3.2/go.mod h1:3D6QYkqRmASS/4HsjU+U/3fscyc5M9xKRfywZsKuoZY= github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= @@ -981,6 +983,8 @@ github.com/buengese/sgzip v0.1.1 h1:ry+T8l1mlmiWEsDrH/YHZnCVWD2S3im1KLsyO+8ZmTU= github.com/buengese/sgzip v0.1.1/go.mod h1:i5ZiXGF3fhV7gL1xaRRL1nDnmpNj0X061FQzOS8VMas= github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM= github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY= +github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0= +github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/caio/go-tdigest/v4 v4.0.1 h1:sx4ZxjmIEcLROUPs2j1BGe2WhOtHD6VSe6NNbBdKYh4= @@ -2032,10 +2036,14 @@ github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7s github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/oracle/oci-go-sdk/v65 v65.69.2 h1:lROMJ8/VakGOGObAWUxTVY2AX1wQCUIzVqfL4Fb2Ay8= github.com/oracle/oci-go-sdk/v65 v65.69.2/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0= +github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw= +github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0= github.com/p4lang/p4runtime v1.4.0 h1:LbCCClz/5uJzLU+puL2aA/0Bz6xiZKxKVyVlTIhAWOQ= github.com/p4lang/p4runtime v1.4.0/go.mod h1:OWAP4Wh9uKGnQjleslObpFE0REP78b5gR1pHyYmvNPQ= -github.com/panjf2000/ants/v2 v2.9.1 h1:Q5vh5xohbsZXGcD6hhszzGqB7jSSc2/CRr3QKIga8Kw= -github.com/panjf2000/ants/v2 v2.9.1/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= +github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8= +github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= +github.com/panjf2000/gnet/v2 v2.5.7 h1:EGGIfLYEVAp2l5WSYT2XddSjpQ642PjwphbWhcJ0WBY= +github.com/panjf2000/gnet/v2 v2.5.7/go.mod h1:ppopMJ8VrDbJu8kDsqFQTgNmpMS8Le5CmPxISf+Sauk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.1 h1:Ah6WQ56rZONR3RW3qWa2NCZ6JAVvSpUcoLBaOmYFt9Q= @@ -2408,6 +2416,8 @@ github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtC github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/zentures/cityhash v0.0.0-20131128155616-cdd6a94144ab h1:BD4YbH4Y0ysgbrP9jGuDB0BxkqyTRk6Y70o3D5Z5ayc= +github.com/zentures/cityhash v0.0.0-20131128155616-cdd6a94144ab/go.mod h1:SvJE1nX57VqPOyqkQGEGcJPWZqeB3FCZ8s7a0uSlG+A= github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= github.com/zitadel/oidc/v3 v3.30.0 h1:1IuZlK+X+JLExEA2PYgRlVvWHBhz/cMwT7VL/YrQabw= @@ -2522,6 +2532,7 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2676,6 +2687,7 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2863,6 +2875,7 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2882,6 +2895,7 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2903,6 +2917,7 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2911,6 +2926,7 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -3314,6 +3330,8 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/olivere/elastic.v5 v5.0.86 h1:xFy6qRCGAmo5Wjx96srho9BitLhZl2fcnpuidPwduXM= gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= From 0fb850e45bdc6dee3a4e1f39a2e64c314b5f6913 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 10:55:18 +0800 Subject: [PATCH 004/170] feat(outputs): Fix error from make check-deps --- docs/LICENSE_OF_DEPENDENCIES.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ae4696ad3cee3..d59deda6aff1d 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,6 +8,7 @@ following works: - collectd.org [ISC License](https://github.com/collectd/go-collectd/blob/master/LICENSE) - dario.cat/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE) - filippo.io/edwards25519 [BSD 3-Clause "New" or "Revised" License](https://github.com/FiloSottile/edwards25519/blob/main/LICENSE) +- github.com/99designs/go-keychain [MIT License](https://github.com/99designs/go-keychain/blob/master/LICENSE) - github.com/99designs/keyring [MIT License](https://github.com/99designs/keyring/blob/master/LICENSE) - github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) - github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) @@ -51,6 +52,7 @@ following works: - github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) - github.com/antlr4-go/antlr [BSD 3-Clause "New" or "Revised" License](https://github.com/antlr/antlr4/blob/master/LICENSE.txt) - github.com/apache/arrow/go [Apache License 2.0](https://github.com/apache/arrow/blob/master/LICENSE.txt) +- github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang [Apache License 2.0](https://github.com/apache/inlong/blob/master/LICENSE) - github.com/apache/iotdb-client-go [Apache License 2.0](https://github.com/apache/iotdb-client-go/blob/main/LICENSE) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) @@ -93,6 +95,7 @@ following works: - github.com/bmatcuk/doublestar [MIT License](https://github.com/bmatcuk/doublestar/blob/master/LICENSE) - github.com/boschrexroth/ctrlx-datalayer-golang [MIT License](https://github.com/boschrexroth/ctrlx-datalayer-golang/blob/main/LICENSE) - github.com/bufbuild/protocompile [Apache License 2.0](https://github.com/bufbuild/protocompile/blob/main/LICENSE) +- github.com/bwmarrin/snowflake [BSD 2-Clause "Simplified" License] (https://github.com/bwmarrin/snowflake/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) @@ -139,6 +142,7 @@ following works: - github.com/gabriel-vasile/mimetype [MIT License](https://github.com/gabriel-vasile/mimetype/blob/master/LICENSE) - github.com/go-asn1-ber/asn1-ber [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) - github.com/go-chi/chi [MIT License](https://github.com/go-chi/chi/blob/master/LICENSE) +- github.com/go-darwin/apfs [BSD 3-Clause "New" or "Revised" License] (https://github.com/go-darwin/apfs/blob/main/LICENSE) - github.com/go-git/go-billy [Apache License 2.0](https://github.com/go-git/go-billy/blob/master/LICENSE) - github.com/go-ldap/ldap [MIT License](https://github.com/go-ldap/ldap/blob/v3.4.1/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) @@ -149,6 +153,7 @@ following works: - github.com/go-openapi/jsonreference [Apache License 2.0](https://github.com/go-openapi/jsonreference/blob/master/LICENSE) - github.com/go-openapi/swag [Apache License 2.0](https://github.com/go-openapi/swag/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) +- github.com/go-resty/resty [MIT License] (https://github.com/go-resty/resty/blob/v2/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/go-stomp/stomp [Apache License 2.0](https://github.com/go-stomp/stomp/blob/master/LICENSE.txt) @@ -301,7 +306,9 @@ following works: - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) - github.com/opensearch-project/opensearch-go [Apache License 2.0](https://github.com/opensearch-project/opensearch-go/blob/main/LICENSE.txt) - github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) +- github.com/oxtoacart/bpool [Apache License 2.0] (https://github.com/oxtoacart/bpool/blob/master/LICENSE) - github.com/p4lang/p4runtime [Apache License 2.0](https://github.com/p4lang/p4runtime/blob/main/LICENSE) +- github.com/panjf2000/gnet [Apache License 2.0] (https://github.com/panjf2000/gnet/blob/dev/LICENSE) - github.com/pborman/ansi [BSD 3-Clause "New" or "Revised" License](https://github.com/pborman/ansi/blob/master/LICENSE) - github.com/pcolladosoto/goslurm [MIT License](https://github.com/pcolladosoto/goslurm/blob/main/LICENSE) - github.com/peterbourgon/unixtransport [Apache License 2.0](https://github.com/peterbourgon/unixtransport/blob/main/LICENSE) @@ -368,6 +375,7 @@ following works: - github.com/uber/jaeger-client-go [Apache License 2.0](https://github.com/jaegertracing/jaeger-client-go/blob/master/LICENSE) - github.com/uber/jaeger-lib [Apache License 2.0](https://github.com/jaegertracing/jaeger-lib/blob/main/LICENSE) - github.com/urfave/cli [MIT License](https://github.com/urfave/cli/blob/main/LICENSE) +- github.com/valyala/bytebufferpool [MIT License] (https://github.com/valyala/bytebufferpool/blob/master/LICENSE) - github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) @@ -386,6 +394,7 @@ following works: - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) - github.com/yusufpapurcu/wmi [MIT License](https://github.com/yusufpapurcu/wmi/blob/master/LICENSE) - github.com/zeebo/xxh3 [BSD 2-Clause "Simplified" License](https://github.com/zeebo/xxh3/blob/master/LICENSE) +- github.com/zentures/cityhash [MIT License] (https://github.com/zentures/cityhash/blob/master/LICENSE) - go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.opentelemetry.io/collector/consumer [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) @@ -426,6 +435,7 @@ following works: - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) - gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) +- gopkg.in/natefinch/lumberjack.v2 [MIT License] (https://github.com/natefinch/lumberjack/blob/v2.0/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) From 3bd15ec92727f5dd5e8eece8eb846acb54593ab6 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 11:04:57 +0800 Subject: [PATCH 005/170] feat(outputs): Fix error from make check-deps --- docs/LICENSE_OF_DEPENDENCIES.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index d59deda6aff1d..9c76f778ef648 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,7 +8,6 @@ following works: - collectd.org [ISC License](https://github.com/collectd/go-collectd/blob/master/LICENSE) - dario.cat/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE) - filippo.io/edwards25519 [BSD 3-Clause "New" or "Revised" License](https://github.com/FiloSottile/edwards25519/blob/main/LICENSE) -- github.com/99designs/go-keychain [MIT License](https://github.com/99designs/go-keychain/blob/master/LICENSE) - github.com/99designs/keyring [MIT License](https://github.com/99designs/keyring/blob/master/LICENSE) - github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) - github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) @@ -142,7 +141,6 @@ following works: - github.com/gabriel-vasile/mimetype [MIT License](https://github.com/gabriel-vasile/mimetype/blob/master/LICENSE) - github.com/go-asn1-ber/asn1-ber [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) - github.com/go-chi/chi [MIT License](https://github.com/go-chi/chi/blob/master/LICENSE) -- github.com/go-darwin/apfs [BSD 3-Clause "New" or "Revised" License] (https://github.com/go-darwin/apfs/blob/main/LICENSE) - github.com/go-git/go-billy [Apache License 2.0](https://github.com/go-git/go-billy/blob/master/LICENSE) - github.com/go-ldap/ldap [MIT License](https://github.com/go-ldap/ldap/blob/v3.4.1/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) From 2b6bd9110048ccd50b605cf83b99b421abfb5a73 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 11:28:05 +0800 Subject: [PATCH 006/170] feat(outputs): Fix error code style --- plugins/outputs/inlong/inlong.go | 14 +++++++------- plugins/outputs/inlong/inlong_test.go | 22 +++++++++------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index c6224bfbc5775..bdc4c79f76f13 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -15,10 +15,10 @@ import ( var sampleConfig string type Inlong struct { - GroupId string `toml:"group_id"` - StreamId string `toml:"stream_id"` - ManagerUrl string `toml:"manager_url"` - RetryIntervalMs int64 `toml:"retry_interval_ms"` + GroupID string `toml:"group_id"` + StreamID string `toml:"stream_id"` + ManagerURL string `toml:"manager_url"` + RetryIntervalMs int64 `toml:"retry_interval_ms"` Log telegraf.Logger `toml:"-"` producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) @@ -35,7 +35,7 @@ func (i *Inlong) SetSerializer(serializer serializers.Serializer) { } func (i *Inlong) Connect() error { - producer, err := i.producerFunc(i.GroupId, i.ManagerUrl) + producer, err := i.producerFunc(i.GroupID, i.ManagerURL) if err != nil { return &internal.StartupError{Err: err, Retry: true} } @@ -55,8 +55,8 @@ func (i *Inlong) Write(metrics []telegraf.Metric) error { return fmt.Errorf("could not serialize metric: %w", err) } err = i.producer.Send(context.Background(), dataproxy.Message{ - GroupID: i.GroupId, - StreamID: i.StreamId, + GroupID: i.GroupID, + StreamID: i.StreamID, Payload: b, }) if err != nil { diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index 110b90c6ec82b..e6508d2bb701a 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -12,31 +12,28 @@ import ( ) type MockProducer struct { - groupId string - managerUrl string + groupID string + managerURL string } -func (p *MockProducer) Send(ctx context.Context, msg dataproxy.Message) error { - +func (p *MockProducer) Send(context.Context, dataproxy.Message) error { return nil } -func (p *MockProducer) SendAsync(ctx context.Context, msg dataproxy.Message, callback dataproxy.Callback) { - return +func (p *MockProducer) SendAsync(context.Context, dataproxy.Message, dataproxy.Callback) { } func (p *MockProducer) Close() { - return } -func (p *MockProducer) SendMessage(ctx context.Context, msg dataproxy.Message) error { +func (p *MockProducer) SendMessage(context.Context, dataproxy.Message) error { return nil } -func NewMockProducer(groupId string, managerUrl string) (dataproxy.Client, error) { +func NewMockProducer(groupID string, managerURL string) (dataproxy.Client, error) { p := &MockProducer{} - p.groupId = groupId - p.managerUrl = managerUrl + p.groupID = groupID + p.managerURL = managerURL return p, nil } @@ -51,7 +48,7 @@ func TestInlong_Connect(t *testing.T) { func TestInlong_Write(t *testing.T) { s := &csv.Serializer{Header: true} - s.Init() + require.NoError(t, s.Init()) t.Run("", func(t *testing.T) { producer := &MockProducer{} i := &Inlong{ @@ -72,5 +69,4 @@ func TestInlong_Write(t *testing.T) { metrics = append(metrics, m) require.NoError(t, i.Write(metrics)) }) - } From 7bd148f307350a3320d30f7cd71512806d288285 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 11:31:45 +0800 Subject: [PATCH 007/170] feat(outputs): Fix code fmt error --- plugins/outputs/inlong/inlong.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index bdc4c79f76f13..0c4b9d12ae83f 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -15,10 +15,10 @@ import ( var sampleConfig string type Inlong struct { - GroupID string `toml:"group_id"` - StreamID string `toml:"stream_id"` - ManagerURL string `toml:"manager_url"` - RetryIntervalMs int64 `toml:"retry_interval_ms"` + GroupID string `toml:"group_id"` + StreamID string `toml:"stream_id"` + ManagerURL string `toml:"manager_url"` + RetryIntervalMs int64 `toml:"retry_interval_ms"` Log telegraf.Logger `toml:"-"` producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) From 7bb28921b80c58a6ebff1e2678ff629e9716c5ab Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 12:33:33 +0800 Subject: [PATCH 008/170] feat(outputs): Fix code fmt error --- plugins/outputs/inlong/inlong_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index e6508d2bb701a..b52a50be60964 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -30,7 +30,7 @@ func (p *MockProducer) SendMessage(context.Context, dataproxy.Message) error { return nil } -func NewMockProducer(groupID string, managerURL string) (dataproxy.Client, error) { +func NewMockProducer(groupID, managerURL string) (dataproxy.Client, error) { p := &MockProducer{} p.groupID = groupID p.managerURL = managerURL From 3bdc83f0834282ad5f08d8ac4a9a5941115ce4ca Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 12:45:06 +0800 Subject: [PATCH 009/170] feat(outputs): Fix error from test-go-linux-386 --- plugins/outputs/all/inlong.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/all/inlong.go b/plugins/outputs/all/inlong.go index 3e90277ea1e1c..2b3a360b709e1 100644 --- a/plugins/outputs/all/inlong.go +++ b/plugins/outputs/all/inlong.go @@ -1,4 +1,4 @@ -//go:build !custom || outputs || outputs.file +//go:build !custom || outputs || outputs.inlong package all From 3b39bdab626d76167443ef6d6b7cfb0230f3a811 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 14:23:08 +0800 Subject: [PATCH 010/170] feat(outputs): Fix error from lint code base --- docs/LICENSE_OF_DEPENDENCIES.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 9c76f778ef648..ea7cdf17d143c 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -94,7 +94,7 @@ following works: - github.com/bmatcuk/doublestar [MIT License](https://github.com/bmatcuk/doublestar/blob/master/LICENSE) - github.com/boschrexroth/ctrlx-datalayer-golang [MIT License](https://github.com/boschrexroth/ctrlx-datalayer-golang/blob/main/LICENSE) - github.com/bufbuild/protocompile [Apache License 2.0](https://github.com/bufbuild/protocompile/blob/main/LICENSE) -- github.com/bwmarrin/snowflake [BSD 2-Clause "Simplified" License] (https://github.com/bwmarrin/snowflake/blob/master/LICENSE) +- github.com/bwmarrin/snowflake [BSD 2-Clause "Simplified" License](https://github.com/bwmarrin/snowflake/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) @@ -151,7 +151,7 @@ following works: - github.com/go-openapi/jsonreference [Apache License 2.0](https://github.com/go-openapi/jsonreference/blob/master/LICENSE) - github.com/go-openapi/swag [Apache License 2.0](https://github.com/go-openapi/swag/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) -- github.com/go-resty/resty [MIT License] (https://github.com/go-resty/resty/blob/v2/LICENSE) +- github.com/go-resty/resty [MIT License](https://github.com/go-resty/resty/blob/v2/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/go-stomp/stomp [Apache License 2.0](https://github.com/go-stomp/stomp/blob/master/LICENSE.txt) @@ -304,9 +304,9 @@ following works: - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) - github.com/opensearch-project/opensearch-go [Apache License 2.0](https://github.com/opensearch-project/opensearch-go/blob/main/LICENSE.txt) - github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) -- github.com/oxtoacart/bpool [Apache License 2.0] (https://github.com/oxtoacart/bpool/blob/master/LICENSE) +- github.com/oxtoacart/bpool [Apache License 2.0](https://github.com/oxtoacart/bpool/blob/master/LICENSE) - github.com/p4lang/p4runtime [Apache License 2.0](https://github.com/p4lang/p4runtime/blob/main/LICENSE) -- github.com/panjf2000/gnet [Apache License 2.0] (https://github.com/panjf2000/gnet/blob/dev/LICENSE) +- github.com/panjf2000/gnet [Apache License 2.0](https://github.com/panjf2000/gnet/blob/dev/LICENSE) - github.com/pborman/ansi [BSD 3-Clause "New" or "Revised" License](https://github.com/pborman/ansi/blob/master/LICENSE) - github.com/pcolladosoto/goslurm [MIT License](https://github.com/pcolladosoto/goslurm/blob/main/LICENSE) - github.com/peterbourgon/unixtransport [Apache License 2.0](https://github.com/peterbourgon/unixtransport/blob/main/LICENSE) @@ -373,7 +373,7 @@ following works: - github.com/uber/jaeger-client-go [Apache License 2.0](https://github.com/jaegertracing/jaeger-client-go/blob/master/LICENSE) - github.com/uber/jaeger-lib [Apache License 2.0](https://github.com/jaegertracing/jaeger-lib/blob/main/LICENSE) - github.com/urfave/cli [MIT License](https://github.com/urfave/cli/blob/main/LICENSE) -- github.com/valyala/bytebufferpool [MIT License] (https://github.com/valyala/bytebufferpool/blob/master/LICENSE) +- github.com/valyala/bytebufferpool [MIT License](https://github.com/valyala/bytebufferpool/blob/master/LICENSE) - github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) @@ -392,7 +392,7 @@ following works: - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) - github.com/yusufpapurcu/wmi [MIT License](https://github.com/yusufpapurcu/wmi/blob/master/LICENSE) - github.com/zeebo/xxh3 [BSD 2-Clause "Simplified" License](https://github.com/zeebo/xxh3/blob/master/LICENSE) -- github.com/zentures/cityhash [MIT License] (https://github.com/zentures/cityhash/blob/master/LICENSE) +- github.com/zentures/cityhash [MIT License](https://github.com/zentures/cityhash/blob/master/LICENSE) - go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.opentelemetry.io/collector/consumer [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) @@ -433,7 +433,7 @@ following works: - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) - gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) -- gopkg.in/natefinch/lumberjack.v2 [MIT License] (https://github.com/natefinch/lumberjack/blob/v2.0/LICENSE) +- gopkg.in/natefinch/lumberjack.v2 [MIT License](https://github.com/natefinch/lumberjack/blob/v2.0/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) From 7541f2c402b60ce37e63a2d8eef01b4c6df1362e Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 21 Nov 2024 19:40:41 +0800 Subject: [PATCH 011/170] feat(outputs): Fix error from test in mac --- plugins/outputs/inlong/inlong_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index b52a50be60964..7ab7fbd85dfbf 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -58,7 +58,7 @@ func TestInlong_Write(t *testing.T) { m := metric.New( "cpu", map[string]string{ - "topic": "xyzzy", + "topic": "test-topic", }, map[string]interface{}{ "value": 42.0, From 6de0d4e7a1527bb2f784936be12adf60d7e75dd0 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Sun, 24 Nov 2024 22:44:42 +0800 Subject: [PATCH 012/170] Update plugins/outputs/inlong/inlong.go Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- plugins/outputs/inlong/inlong.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index 0c4b9d12ae83f..26e20fb077157 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -4,7 +4,9 @@ import ( "context" _ "embed" "fmt" + "github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang/dataproxy" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" From 899d975243a6761e76751b3c2758dfff55182571 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Sun, 24 Nov 2024 22:44:51 +0800 Subject: [PATCH 013/170] Update plugins/outputs/inlong/inlong_test.go Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- plugins/outputs/inlong/inlong_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index 7ab7fbd85dfbf..17500ec2d1ead 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -2,13 +2,15 @@ package inlong import ( "context" + "testing" + "time" + "github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang/dataproxy" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers/csv" - "github.com/stretchr/testify/require" - "testing" - "time" ) type MockProducer struct { From 480d75fd9b3bdf250cb49429f347e725805f2ed7 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 25 Nov 2024 10:32:36 +0800 Subject: [PATCH 014/170] feat(outputs): Make some changes based on the comments --- plugins/outputs/inlong/inlong.go | 25 ++++++++------- plugins/outputs/inlong/inlong_test.go | 46 ++++++++++++--------------- plugins/outputs/inlong/sample.conf | 3 -- 3 files changed, 33 insertions(+), 41 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index 26e20fb077157..ca0c74b7d77d9 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -20,7 +20,6 @@ type Inlong struct { GroupID string `toml:"group_id"` StreamID string `toml:"stream_id"` ManagerURL string `toml:"manager_url"` - RetryIntervalMs int64 `toml:"retry_interval_ms"` Log telegraf.Logger `toml:"-"` producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) @@ -71,17 +70,19 @@ func (i *Inlong) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("inlong", func() telegraf.Output { return &Inlong{ - producerFunc: func(id string, url string) (dataproxy.Client, error) { - producer, err := dataproxy.NewClient( - dataproxy.WithGroupID(id), - dataproxy.WithURL(url), - ) - if err != nil { - fmt.Println(err) - return nil, err - } - return producer, nil - }, + producerFunc: NewProducer, } }) } + +func NewProducer(groupID, managerURL string) (dataproxy.Client, error) { + producer, err := dataproxy.NewClient( + dataproxy.WithGroupID(groupID), + dataproxy.WithURL(managerURL), + ) + if err != nil { + fmt.Println(err) + return nil, err + } + return producer, nil +} diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index 17500ec2d1ead..be0bef2462c70 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -40,35 +40,29 @@ func NewMockProducer(groupID, managerURL string) (dataproxy.Client, error) { } func TestInlong_Connect(t *testing.T) { - t.Run("", func(t *testing.T) { - i := &Inlong{ - producerFunc: NewMockProducer, - } - require.NoError(t, i.Connect()) - }) + i := &Inlong{producerFunc: NewMockProducer} + require.NoError(t, i.Connect()) } func TestInlong_Write(t *testing.T) { s := &csv.Serializer{Header: true} require.NoError(t, s.Init()) - t.Run("", func(t *testing.T) { - producer := &MockProducer{} - i := &Inlong{ - producer: producer, - serializer: s, - } - m := metric.New( - "cpu", - map[string]string{ - "topic": "test-topic", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ) - var metrics []telegraf.Metric - metrics = append(metrics, m) - require.NoError(t, i.Write(metrics)) - }) + producer := &MockProducer{} + i := &Inlong{ + producer: producer, + serializer: s, + } + m := metric.New( + "cpu", + map[string]string{ + "topic": "test-topic", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + var metrics []telegraf.Metric + metrics = append(metrics, m) + require.NoError(t, i.Write(metrics)) } diff --git a/plugins/outputs/inlong/sample.conf b/plugins/outputs/inlong/sample.conf index b8828a73fcebe..dfef78212803b 100644 --- a/plugins/outputs/inlong/sample.conf +++ b/plugins/outputs/inlong/sample.conf @@ -10,9 +10,6 @@ ## The URL used to obtain the Inlong DataProxy IP list to which the data will be sent inlong_manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" - ## Retry interval, the interval to resend the message in the resend queue. - retry_interval_ms = 10 - ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: From ab9a6ceee00e80ddf7a76750303ea75f8ea7726a Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 25 Nov 2024 10:35:21 +0800 Subject: [PATCH 015/170] feat(outputs): Fix fmt error --- plugins/outputs/inlong/inlong.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index ca0c74b7d77d9..bd2e969d2ae35 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -17,10 +17,10 @@ import ( var sampleConfig string type Inlong struct { - GroupID string `toml:"group_id"` - StreamID string `toml:"stream_id"` - ManagerURL string `toml:"manager_url"` - Log telegraf.Logger `toml:"-"` + GroupID string `toml:"group_id"` + StreamID string `toml:"stream_id"` + ManagerURL string `toml:"manager_url"` + Log telegraf.Logger `toml:"-"` producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) producer dataproxy.Client From a0a2d0246814234f897ba1fdaf6e3c8af8a5ea6c Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 25 Nov 2024 12:27:12 +0800 Subject: [PATCH 016/170] feat(outputs): Fix config error --- plugins/outputs/inlong/README.md | 13 +++++-------- plugins/outputs/inlong/inlong.go | 4 +++- plugins/outputs/inlong/sample.conf | 10 +++++----- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index a7a7d347eb4d3..fa2e01cf9e067 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -16,18 +16,15 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ```toml @sample.conf # Send telegraf metrics to Inlong [[outputs.inlong]] - ## From the inlong system, data Streams Group, it contains multiple data streams, and one Group represents + ## From the Inlong system, data streams group, it contains multiple data streams, and one Group represents ## one data business unit. - inlong_group_id = "test_group" + group_id = "test_group" - ## From the inlong system, data Stream, a stream has a specific data source, data format and data sink. - inlong_stream_id = "test_stream" - - ## Retry interval, the interval to resend the message in the resend queue. - retry_interval_ms = 10 + ## From the Inlong system, data stream, a stream has a specific data source, data format and data sink. + stream_id = "test_stream" ## The URL used to obtain the Inlong DataProxy IP list to which the data will be sent - inlong_manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" + manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" ## Data format to output. ## Each data format has its own unique set of configuration options, read diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index bd2e969d2ae35..fa6a4e4e279f5 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -16,6 +16,8 @@ import ( //go:embed sample.conf var sampleConfig string +const ManagerUrlSuffix = "/inlong/manager/openapi/dataproxy/getIpList" + type Inlong struct { GroupID string `toml:"group_id"` StreamID string `toml:"stream_id"` @@ -36,7 +38,7 @@ func (i *Inlong) SetSerializer(serializer serializers.Serializer) { } func (i *Inlong) Connect() error { - producer, err := i.producerFunc(i.GroupID, i.ManagerURL) + producer, err := i.producerFunc(i.GroupID, i.ManagerURL+ManagerUrlSuffix) if err != nil { return &internal.StartupError{Err: err, Retry: true} } diff --git a/plugins/outputs/inlong/sample.conf b/plugins/outputs/inlong/sample.conf index dfef78212803b..bb5a781b60c2e 100644 --- a/plugins/outputs/inlong/sample.conf +++ b/plugins/outputs/inlong/sample.conf @@ -1,14 +1,14 @@ # Send telegraf metrics to Inlong [[outputs.inlong]] - ## From the inlong system, data Streams Group, it contains multiple data streams, and one Group represents + ## From the Inlong system, data streams group, it contains multiple data streams, and one Group represents ## one data business unit. - inlong_group_id = "test_group" + group_id = "test_group" - ## From the inlong system, data Stream, a stream has a specific data source, data format and data sink. - inlong_stream_id = "test_stream" + ## From the Inlong system, data stream, a stream has a specific data source, data format and data sink. + stream_id = "test_stream" ## The URL used to obtain the Inlong DataProxy IP list to which the data will be sent - inlong_manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" + manager_url = "http://127.0.0.1:8083" ## Data format to output. ## Each data format has its own unique set of configuration options, read From 9b3dbf13cb5550eac5fe2b19767763e670d065e2 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 25 Nov 2024 12:41:14 +0800 Subject: [PATCH 017/170] feat(outputs): Fix fmt error --- plugins/outputs/inlong/inlong.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index fa6a4e4e279f5..004e1db835cd0 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -16,7 +16,7 @@ import ( //go:embed sample.conf var sampleConfig string -const ManagerUrlSuffix = "/inlong/manager/openapi/dataproxy/getIpList" +const ManagerURLSuffix = "/inlong/manager/openapi/dataproxy/getIpList" type Inlong struct { GroupID string `toml:"group_id"` @@ -38,7 +38,7 @@ func (i *Inlong) SetSerializer(serializer serializers.Serializer) { } func (i *Inlong) Connect() error { - producer, err := i.producerFunc(i.GroupID, i.ManagerURL+ManagerUrlSuffix) + producer, err := i.producerFunc(i.GroupID, i.ManagerURL+ManagerURLSuffix) if err != nil { return &internal.StartupError{Err: err, Retry: true} } From 2a983ad4bfa840930d0c2e17a2169813c0c4a2a3 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 25 Nov 2024 14:08:50 +0800 Subject: [PATCH 018/170] feat(outputs): Fix comments error --- plugins/outputs/inlong/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index fa2e01cf9e067..a377e9221508e 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -24,7 +24,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. stream_id = "test_stream" ## The URL used to obtain the Inlong DataProxy IP list to which the data will be sent - manager_url = "http://127.0.0.1:8083/inlong/manager/openapi/dataproxy/getIpList" + manager_url = "http://127.0.0.1:8083" ## Data format to output. ## Each data format has its own unique set of configuration options, read From 25fb4494867e17bd5646e52d17e6a7faa44fb9d4 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 25 Nov 2024 15:14:56 +0800 Subject: [PATCH 019/170] feat(outputs): Improve annotations --- plugins/outputs/inlong/README.md | 7 ++++--- plugins/outputs/inlong/sample.conf | 10 ++++------ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index a377e9221508e..24ce988111af6 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -30,14 +30,15 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - ## Suggest using CSV format here, as inlong is also processed in CSV format + ## Suggest using CSV format here, as Inlong is also processed in CSV format data_format = "csv" ## The delimiter used when serializing data in CSV format needs to be consistent with the delimiter - ## configured for inlong, so that the data can be parsed properly after it reaches inlong + ## configured for Inlong, so that the data can be parsed properly after it reaches Inlong. + ## It can be a space, vertical bar (|), comma (,), semicolon (;), asterisk (*), double quotes ("), etc. csv_separator = "|" ## The final output field order here needs to be consistent with the field order defined by the data - ## stream in inlong + ## stream in Inlong csv_columns = ["field.key","file.value"] ``` diff --git a/plugins/outputs/inlong/sample.conf b/plugins/outputs/inlong/sample.conf index bb5a781b60c2e..e32fd1a07b048 100644 --- a/plugins/outputs/inlong/sample.conf +++ b/plugins/outputs/inlong/sample.conf @@ -14,16 +14,14 @@ ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - ## Suggest using CSV format here, as inlong is also processed in CSV format + ## Suggest using CSV format here, as Inlong is also processed in CSV format data_format = "csv" ## The delimiter used when serializing data in CSV format needs to be consistent with the delimiter - ## configured for inlong, so that the data can be parsed properly after it reaches inlong + ## configured for Inlong, so that the data can be parsed properly after it reaches Inlong. + ## It can be a space, vertical bar (|), comma (,), semicolon (;), asterisk (*), double quotes ("), etc. csv_separator = "|" ## The final output field order here needs to be consistent with the field order defined by the data - ## stream in inlong + ## stream in Inlong csv_columns = ["field.key","file.value"] - - - From ffd7a904a8ef6604f207b011c138a1369c4762d9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:13:01 +0100 Subject: [PATCH 020/170] chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.42.2 to 1.43.1 (#16198) --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 7ecbfd5e18d30..17b76a7def7f8 100644 --- a/go.mod +++ b/go.mod @@ -44,18 +44,18 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/awnumar/memguard v0.22.5 - github.com/aws/aws-sdk-go-v2 v1.32.4 + github.com/aws/aws-sdk-go-v2 v1.32.5 github.com/aws/aws-sdk-go-v2/config v1.27.39 github.com/aws/aws-sdk-go-v2/credentials v1.17.44 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.36.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 - github.com/aws/smithy-go v1.22.0 + github.com/aws/smithy-go v1.22.1 github.com/benbjohnson/clock v1.3.5 github.com/blues/jsonata-go v1.5.4 github.com/bmatcuk/doublestar/v3 v3.0.0 @@ -280,8 +280,8 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 // indirect diff --git a/go.sum b/go.sum index 5269eb144d935..724b35dab962f 100644 --- a/go.sum +++ b/go.sum @@ -858,8 +858,8 @@ github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.32.4 h1:S13INUiTxgrPueTmrm5DZ+MiAo99zYzHEFh1UNkOxNE= -github.com/aws/aws-sdk-go-v2 v1.32.4/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= +github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= @@ -881,19 +881,19 @@ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayx github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 h1:A2w6m6Tmr+BNXjDsr7M90zkWjsu4JXHwrzPg235STs4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23/go.mod h1:35EVp9wyeANdujZruvHiQUAo9E3vbhnIO1mTCAxMlY0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 h1:4usbeaes3yJnCFC7kfeyhkdkPtoRYPa/hTmCqMpKpLI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24/go.mod h1:5CI1JemjVwde8m2WG3cz23qHKPOxbpkq0HaoreEgLIY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 h1:pgYW9FCabt2M25MoHYCfMrVY2ghiiBKYWUVXfwZs+sU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23/go.mod h1:c48kLgzO19wAu3CPkDWC28JbaJ+hfQlsdl7I2+oqIbk= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 h1:N1zsICrQglfzaBnrfM0Ys00860C+QFwu6u/5+LomP+o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24/go.mod h1:dCn9HbJ8+K31i8IQ8EWmWj0EiIk0+vKiHNMxTTYveAg= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2 h1:eMh+iBTF1CbpHMfiRvIaVm+rzrH1DOzuSFaR55O+bBo= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2/go.mod h1:/A4zNqF1+RS5RV+NNLKIzUX1KtK5SoWgf/OpiqrwmBo= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1 h1:FbjhJTRoTujDYDwTnnE46Km5Qh1mMSH+BwTL4ODFifg= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1/go.mod h1:OwyCzHw6CH8pkLqT8uoCkOgUsgm11LTfexLZyRy6fBg= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 h1:nawnkdqwinpBukRuDd+h0eURWHk67W4OInSJrD4NJsE= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0/go.mod h1:K27H8p8ZmsntKSSC8det8LuT5WahXoJ4vZqlWwKTRaM= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= @@ -940,8 +940,8 @@ github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= -github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= From 38df1817710bc3280fe46ec3afc2e855486689dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:15:18 +0100 Subject: [PATCH 021/170] chore(deps): Bump github.com/rclone/rclone from 1.68.1 to 1.68.2 (#16200) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 17b76a7def7f8..ce17fb583f40a 100644 --- a/go.mod +++ b/go.mod @@ -169,7 +169,7 @@ require ( github.com/prometheus/procfs v0.15.1 github.com/prometheus/prometheus v0.54.1 github.com/rabbitmq/amqp091-go v1.10.0 - github.com/rclone/rclone v1.68.1 + github.com/rclone/rclone v1.68.2 github.com/redis/go-redis/v9 v9.6.1 github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16 github.com/robbiet480/go.nut v0.0.0-20220219091450-bd8f121e1fa1 diff --git a/go.sum b/go.sum index 724b35dab962f..a690c688d2ed6 100644 --- a/go.sum +++ b/go.sum @@ -2129,8 +2129,8 @@ github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzX github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= -github.com/rclone/rclone v1.68.1 h1:vlEOAuPv4gGxWECM0NIaCwBNUt3ZQY7mCsyBtZjY+68= -github.com/rclone/rclone v1.68.1/go.mod h1:T8XKOt/2Fb9INROUtFH9eF9q9o9rI1W2qTrW2bw2cYU= +github.com/rclone/rclone v1.68.2 h1:0m2tKzfTnoZRhRseRFO3CsLa5ZCXYz3xWb98ke3dz98= +github.com/rclone/rclone v1.68.2/go.mod h1:DuhVHaYIVgIdtIg8vEVt/IBwyqPJUaarr/+nG8Zg+Fg= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= From 5f8e7f6366ac3cb558bdc69cd034480fd35fec29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:16:12 +0100 Subject: [PATCH 022/170] chore(deps): Bump github.com/intel/powertelemetry from 1.0.1 to 1.0.2 (#16201) Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/intel_powerstat/README.md | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ce17fb583f40a..9e72b062d3672 100644 --- a/go.mod +++ b/go.mod @@ -120,7 +120,7 @@ require ( github.com/influxdata/tail v1.0.1-0.20241014115250-3e0015cb677a github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/intel/iaevents v1.1.0 - github.com/intel/powertelemetry v1.0.1 + github.com/intel/powertelemetry v1.0.2 github.com/jackc/pgconn v1.14.3 github.com/jackc/pgio v1.0.0 github.com/jackc/pgtype v1.14.4 diff --git a/go.sum b/go.sum index a690c688d2ed6..da3c93a13bd46 100644 --- a/go.sum +++ b/go.sum @@ -1594,8 +1594,8 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9s github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/intel/iaevents v1.1.0 h1:FzxMBfXk/apG2EUXUCfaq3gUQ+q+TgZ1HNMjjUILUGE= github.com/intel/iaevents v1.1.0/go.mod h1:CyUUzXw0lHRCsmyyF7Pwco9Y7NiTNQUUlcJ7RJAazKs= -github.com/intel/powertelemetry v1.0.1 h1:a35pZbqOnJlEYGEPXM+YKtetu6D6dJD4Jb4GS4Zetxs= -github.com/intel/powertelemetry v1.0.1/go.mod h1:f6pibcqhQyzN7FRwIXB4mAureaYZfJ+K8Gpm3y1gcrM= +github.com/intel/powertelemetry v1.0.2 h1:092xOflYu+YXzY3c/fQ2DpK1ePy9q9ulbm5yiNYrVkc= +github.com/intel/powertelemetry v1.0.2/go.mod h1:+PHKI9RElL7J1sTjgg3DGxtscD+IiLNmUzV1MOSCZt4= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md index ed4b6897b9420..b851df4cdeac5 100644 --- a/plugins/inputs/intel_powerstat/README.md +++ b/plugins/inputs/intel_powerstat/README.md @@ -452,6 +452,7 @@ powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c0_substate_c0_wa | 0x8F | Intel Sapphire Rapids X | ✓ | | | ✓ | | 0xCF | Intel Emerald Rapids X | ✓ | | | ✓ | | 0xAD | Intel Granite Rapids X | ✓ | | | | +| 0xAE | Intel Granite Rapids D | ✓ | | | | | 0x8A | Intel Lakefield | ✓ | | ✓ | | | 0x97 | Intel AlderLake | ✓ | | ✓ | ✓ | | 0x9A | Intel AlderLake-L | ✓ | | ✓ | ✓ | From d0de0626d6408f9889a3b1b9c2a4c72231980899 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:17:11 +0100 Subject: [PATCH 023/170] chore(deps): Bump super-linter/super-linter from 7.1.0 to 7.2.0 (#16203) --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 9512af781e471..9fd8d494f046a 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -54,7 +54,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: super-linter/super-linter@v7.1.0 + uses: super-linter/super-linter@v7.2.0 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master From f80910be166ede0cc1008c78cc441752439fd98d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:17:40 +0100 Subject: [PATCH 024/170] chore(deps): Bump modernc.org/sqlite from 1.33.1 to 1.34.1 (#16202) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9e72b062d3672..c73d28d3afbeb 100644 --- a/go.mod +++ b/go.mod @@ -234,7 +234,7 @@ require ( k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.30.1 layeh.com/radius v0.0.0-20221205141417-e7fbddd11d68 - modernc.org/sqlite v1.33.1 + modernc.org/sqlite v1.34.1 ) require ( diff --git a/go.sum b/go.sum index da3c93a13bd46..e2e73023c90e4 100644 --- a/go.sum +++ b/go.sum @@ -3417,8 +3417,8 @@ modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.33.1 h1:trb6Z3YYoeM9eDL1O8do81kP+0ejv+YzgyFo+Gwy0nM= -modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/sqlite v1.34.1 h1:u3Yi6M0N8t9yKRDwhXcyp1eS5/ErhPTBggxWFuR6Hfk= +modernc.org/sqlite v1.34.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= From 3dea61cb5c637629bbec1badd514f6ca707f67e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 25 Nov 2024 11:23:17 +0100 Subject: [PATCH 025/170] chore: Fix linter findings for `revive:exported` in `plugins/inputs/n*` (#16205) --- plugins/inputs/nats/nats.go | 4 +- plugins/inputs/nats_consumer/nats_consumer.go | 85 ++-- .../nats_consumer/nats_consumer_test.go | 28 +- plugins/inputs/neoom_beaam/neoom_beaam.go | 12 +- plugins/inputs/neptune_apex/neptune_apex.go | 14 +- .../inputs/neptune_apex/neptune_apex_test.go | 4 +- plugins/inputs/net/net.go | 20 +- plugins/inputs/net/net_test.go | 6 +- plugins/inputs/net_response/net_response.go | 216 +++++---- .../inputs/net_response/net_response_test.go | 10 +- plugins/inputs/netflow/netflow.go | 30 +- plugins/inputs/netflow/netflow_decoder.go | 34 +- plugins/inputs/netflow/netflow_v5.go | 4 +- plugins/inputs/netflow/sflow_v5.go | 18 +- plugins/inputs/netstat/netstat.go | 12 +- plugins/inputs/netstat/netstat_test.go | 2 +- plugins/inputs/nfsclient/nfsclient.go | 410 +++++++++--------- plugins/inputs/nginx/nginx.go | 4 +- plugins/inputs/nginx_plus/nginx_plus.go | 4 +- .../inputs/nginx_plus_api/nginx_plus_api.go | 18 +- plugins/inputs/nginx_sts/nginx_sts.go | 14 +- .../nginx_upstream_check.go | 94 ++-- .../nginx_upstream_check_test.go | 4 +- plugins/inputs/nginx_vts/nginx_vts.go | 18 +- plugins/inputs/nomad/nomad.go | 25 +- plugins/inputs/nomad/nomad_metrics.go | 1 + plugins/inputs/nsd/nsd.go | 88 ++-- plugins/inputs/nsd/nsd_test.go | 4 +- plugins/inputs/nsq/nsq.go | 28 +- plugins/inputs/nsq/nsq_test.go | 4 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 70 ++- plugins/inputs/nstat/nstat.go | 34 +- plugins/inputs/ntpq/ntpq.go | 74 ++-- plugins/inputs/nvidia_smi/common/setters.go | 2 + .../inputs/nvidia_smi/schema_v11/parser.go | 1 + .../inputs/nvidia_smi/schema_v12/parser.go | 1 + 36 files changed, 689 insertions(+), 708 deletions(-) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index ec0e5f6767dd4..43bd4fb30199c 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -23,8 +23,8 @@ import ( var sampleConfig string type Nats struct { - Server string - ResponseTimeout config.Duration + Server string `toml:"server"` + ResponseTimeout config.Duration `toml:"response_timeout"` client *http.Client } diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index fc20cb945d710..7904800499d89 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -19,27 +19,12 @@ import ( //go:embed sample.conf var sampleConfig string -var once sync.Once - var ( + once sync.Once defaultMaxUndeliveredMessages = 1000 ) -type empty struct{} -type semaphore chan empty - -type natsError struct { - conn *nats.Conn - sub *nats.Subscription - err error -} - -func (e natsError) Error() string { - return fmt.Sprintf("%s url:%s id:%s sub:%s queue:%s", - e.err.Error(), e.conn.ConnectedUrl(), e.conn.ConnectedServerId(), e.sub.Subject, e.sub.Queue) -} - -type natsConsumer struct { +type NatsConsumer struct { QueueGroup string `toml:"queue_group"` Subjects []string `toml:"subjects"` Servers []string `toml:"servers"` @@ -70,24 +55,32 @@ type natsConsumer struct { cancel context.CancelFunc } -func (*natsConsumer) SampleConfig() string { - return sampleConfig +type ( + empty struct{} + semaphore chan empty +) + +type natsError struct { + conn *nats.Conn + sub *nats.Subscription + err error } -func (n *natsConsumer) SetParser(parser telegraf.Parser) { - n.parser = parser +func (e natsError) Error() string { + return fmt.Sprintf("%s url:%s id:%s sub:%s queue:%s", + e.err.Error(), e.conn.ConnectedUrl(), e.conn.ConnectedServerId(), e.sub.Subject, e.sub.Queue) } -func (n *natsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e error) { - select { - case n.errs <- natsError{conn: c, sub: s, err: e}: - default: - return - } +func (*NatsConsumer) SampleConfig() string { + return sampleConfig } -// Start the nats consumer. Caller must call *natsConsumer.Stop() to clean up. -func (n *natsConsumer) Start(acc telegraf.Accumulator) error { +func (n *NatsConsumer) SetParser(parser telegraf.Parser) { + n.parser = parser +} + +// Start the nats consumer. Caller must call *NatsConsumer.Stop() to clean up. +func (n *NatsConsumer) Start(acc telegraf.Accumulator) error { n.acc = acc.WithTracking(n.MaxUndeliveredMessages) options := []nats.Option{ @@ -193,9 +186,27 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { return nil } +func (n *NatsConsumer) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (n *NatsConsumer) Stop() { + n.cancel() + n.wg.Wait() + n.clean() +} + +func (n *NatsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e error) { + select { + case n.errs <- natsError{conn: c, sub: s, err: e}: + default: + return + } +} + // receiver() reads all incoming messages from NATS, and parses them into // telegraf metrics. -func (n *natsConsumer) receiver(ctx context.Context) { +func (n *NatsConsumer) receiver(ctx context.Context) { sem := make(semaphore, n.MaxUndeliveredMessages) for { @@ -237,7 +248,7 @@ func (n *natsConsumer) receiver(ctx context.Context) { } } -func (n *natsConsumer) clean() { +func (n *NatsConsumer) clean() { for _, sub := range n.subs { if err := sub.Unsubscribe(); err != nil { n.Log.Errorf("Error unsubscribing from subject %s in queue %s: %s", @@ -257,19 +268,9 @@ func (n *natsConsumer) clean() { } } -func (n *natsConsumer) Stop() { - n.cancel() - n.wg.Wait() - n.clean() -} - -func (n *natsConsumer) Gather(_ telegraf.Accumulator) error { - return nil -} - func init() { inputs.Add("nats_consumer", func() telegraf.Input { - return &natsConsumer{ + return &NatsConsumer{ Servers: []string{"nats://localhost:4222"}, Subjects: []string{"telegraf"}, QueueGroup: "telegraf_consumers", diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index e600f482148e8..27b1408747012 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -28,7 +28,7 @@ func TestStartStop(t *testing.T) { require.NoError(t, container.Start(), "failed to start container") defer container.Terminate() - plugin := &natsConsumer{ + plugin := &NatsConsumer{ Servers: []string{fmt.Sprintf("nats://%s:%s", container.Address, container.Ports["4222"])}, Subjects: []string{"telegraf"}, QueueGroup: "telegraf_consumers", @@ -140,7 +140,7 @@ func TestSendReceive(t *testing.T) { } // Setup the plugin - plugin := &natsConsumer{ + plugin := &NatsConsumer{ Servers: []string{addr}, Subjects: subjects, QueueGroup: "telegraf_consumers", @@ -161,15 +161,15 @@ func TestSendReceive(t *testing.T) { defer plugin.Stop() // Send all messages to the topics (random order due to Golang map) - publisher := &sender{Addr: addr} - require.NoError(t, publisher.Connect()) - defer publisher.Disconnect() + publisher := &sender{addr: addr} + require.NoError(t, publisher.connect()) + defer publisher.disconnect() for topic, msgs := range tt.msgs { for _, msg := range msgs { - require.NoError(t, publisher.Send(topic, msg)) + require.NoError(t, publisher.send(topic, msg)) } } - publisher.Disconnect() + publisher.disconnect() // Wait for the metrics to be collected require.Eventually(t, func() bool { @@ -185,16 +185,12 @@ func TestSendReceive(t *testing.T) { } type sender struct { - Addr string - - Username string - Password string - + addr string conn *nats.Conn } -func (s *sender) Connect() error { - conn, err := nats.Connect(s.Addr) +func (s *sender) connect() error { + conn, err := nats.Connect(s.addr) if err != nil { return err } @@ -203,7 +199,7 @@ func (s *sender) Connect() error { return nil } -func (s *sender) Disconnect() { +func (s *sender) disconnect() { if s.conn != nil && !s.conn.IsClosed() { _ = s.conn.Flush() s.conn.Close() @@ -211,6 +207,6 @@ func (s *sender) Disconnect() { s.conn = nil } -func (s *sender) Send(topic, msg string) error { +func (s *sender) send(topic, msg string) error { return s.conn.Publish(topic, []byte(msg)) } diff --git a/plugins/inputs/neoom_beaam/neoom_beaam.go b/plugins/inputs/neoom_beaam/neoom_beaam.go index 44752c94969c8..c61a2a87f535d 100644 --- a/plugins/inputs/neoom_beaam/neoom_beaam.go +++ b/plugins/inputs/neoom_beaam/neoom_beaam.go @@ -68,12 +68,6 @@ func (n *NeoomBeaam) Start(telegraf.Accumulator) error { return n.updateConfiguration() } -func (n *NeoomBeaam) Stop() { - if n.client != nil { - n.client.CloseIdleConnections() - } -} - func (n *NeoomBeaam) Gather(acc telegraf.Accumulator) error { // Refresh the config if requested if n.RefreshConfig { @@ -97,6 +91,12 @@ func (n *NeoomBeaam) Gather(acc telegraf.Accumulator) error { return nil } +func (n *NeoomBeaam) Stop() { + if n.client != nil { + n.client.CloseIdleConnections() + } +} + func (n *NeoomBeaam) updateConfiguration() error { endpoint := n.Address + "/api/v1/site/configuration" request, err := http.NewRequest("GET", endpoint, nil) diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index cba51fef9304d..d5485959177c7 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -27,6 +27,12 @@ var sampleConfig string // Measurement is constant across all metrics. const Measurement = "neptune_apex" +type NeptuneApex struct { + Servers []string `toml:"servers"` + ResponseTimeout config.Duration `toml:"response_timeout"` + httpClient *http.Client +} + type xmlReply struct { SoftwareVersion string `xml:"software,attr"` HardwareVersion string `xml:"hardware,attr"` @@ -54,18 +60,10 @@ type outlet struct { Xstatus *string `xml:"xstatus"` } -// NeptuneApex implements telegraf.Input. -type NeptuneApex struct { - Servers []string - ResponseTimeout config.Duration - httpClient *http.Client -} - func (*NeptuneApex) SampleConfig() string { return sampleConfig } -// Gather implements telegraf.Input.Gather func (n *NeptuneApex) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, server := range n.Servers { diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 2f52bee7d7f12..a64374cd22bde 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -69,7 +69,7 @@ func TestParseXML(t *testing.T) { }{ { name: "Good test", - xmlResponse: []byte(APEX2016), + xmlResponse: []byte(apex2016), wantMetrics: []telegraf.Metric{ testutil.MustMetric( Measurement, @@ -532,7 +532,7 @@ func fakeHTTPClient(h http.Handler) (*http.Client, func()) { } // Sample configuration from a 2016 version Neptune Apex. -const APEX2016 = ` +const apex2016 = ` apex AC5:12345 diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go index 03d770c75c210..a65f4af20122c 100644 --- a/plugins/inputs/net/net.go +++ b/plugins/inputs/net/net.go @@ -21,20 +21,20 @@ import ( //go:embed sample.conf var sampleConfig string -type NetIOStats struct { - filter filter.Filter - ps system.PS +type Net struct { + Interfaces []string `toml:"interfaces"` + IgnoreProtocolStats bool `toml:"ignore_protocol_stats"` - skipChecks bool - IgnoreProtocolStats bool - Interfaces []string + filter filter.Filter + ps system.PS + skipChecks bool } -func (*NetIOStats) SampleConfig() string { +func (*Net) SampleConfig() string { return sampleConfig } -func (n *NetIOStats) Init() error { +func (n *Net) Init() error { if !n.IgnoreProtocolStats { config.PrintOptionValueDeprecationNotice("inputs.net", "ignore_protocol_stats", "false", telegraf.DeprecationInfo{ @@ -48,7 +48,7 @@ func (n *NetIOStats) Init() error { return nil } -func (n *NetIOStats) Gather(acc telegraf.Accumulator) error { +func (n *Net) Gather(acc telegraf.Accumulator) error { netio, err := n.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %w", err) @@ -153,6 +153,6 @@ func getInterfaceSpeed(ioName string) int64 { func init() { inputs.Add("net", func() telegraf.Input { - return &NetIOStats{ps: system.NewSystemPS()} + return &Net{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 2f8ff0ae7dec5..537943d38c87d 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -44,7 +44,7 @@ func TestNetIOStats(t *testing.T) { t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")) - plugin := &NetIOStats{ps: &mps, skipChecks: true} + plugin := &Net{ps: &mps, skipChecks: true} var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -111,7 +111,7 @@ func TestNetIOStatsSpeedUnsupported(t *testing.T) { t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")) - plugin := &NetIOStats{ps: &mps, skipChecks: true} + plugin := &Net{ps: &mps, skipChecks: true} var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -178,7 +178,7 @@ func TestNetIOStatsNoSpeedFile(t *testing.T) { t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")) - plugin := &NetIOStats{ps: &mps, skipChecks: true} + plugin := &Net{ps: &mps, skipChecks: true} var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index f54f9b4629eb7..e51ff4db40e07 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -20,33 +20,101 @@ import ( //go:embed sample.conf var sampleConfig string -type ResultType uint64 +type resultType uint64 const ( - Success ResultType = 0 - Timeout ResultType = 1 - ConnectionFailed ResultType = 2 - ReadFailed ResultType = 3 - StringMismatch ResultType = 4 + success resultType = 0 + timeout resultType = 1 + connectionFailed resultType = 2 + readFailed resultType = 3 + stringMismatch resultType = 4 ) -// NetResponse struct type NetResponse struct { - Address string - Timeout config.Duration - ReadTimeout config.Duration - Send string - Expect string - Protocol string + Address string `toml:"address"` + Timeout config.Duration `toml:"timeout"` + ReadTimeout config.Duration `toml:"read_timeout"` + Send string `toml:"send"` + Expect string `toml:"expect"` + Protocol string `toml:"protocol"` } func (*NetResponse) SampleConfig() string { return sampleConfig } -// TCPGather will execute if there are TCP tests defined in the configuration. -// It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, error) { +func (n *NetResponse) Init() error { + // Set default values + if n.Timeout == 0 { + n.Timeout = config.Duration(time.Second) + } + if n.ReadTimeout == 0 { + n.ReadTimeout = config.Duration(time.Second) + } + // Check send and expected string + if n.Protocol == "udp" && n.Send == "" { + return errors.New("send string cannot be empty") + } + if n.Protocol == "udp" && n.Expect == "" { + return errors.New("expected string cannot be empty") + } + // Prepare host and port + host, port, err := net.SplitHostPort(n.Address) + if err != nil { + return err + } + if host == "" { + n.Address = "localhost:" + port + } + if port == "" { + return errors.New("bad port in config option address") + } + + if err := choice.Check(n.Protocol, []string{"tcp", "udp"}); err != nil { + return fmt.Errorf("config option protocol: %w", err) + } + + return nil +} + +func (n *NetResponse) Gather(acc telegraf.Accumulator) error { + // Prepare host and port + host, port, err := net.SplitHostPort(n.Address) + if err != nil { + return err + } + + // Prepare data + tags := map[string]string{"server": host, "port": port} + var fields map[string]interface{} + var returnTags map[string]string + + // Gather data + switch n.Protocol { + case "tcp": + returnTags, fields, err = n.tcpGather() + if err != nil { + return err + } + tags["protocol"] = "tcp" + case "udp": + returnTags, fields, err = n.udpGather() + if err != nil { + return err + } + tags["protocol"] = "udp" + } + + // Merge the tags + for k, v := range returnTags { + tags[k] = v + } + // Add metrics + acc.AddFields("net_response", fields, tags) + return nil +} + +func (n *NetResponse) tcpGather() (map[string]string, map[string]interface{}, error) { // Prepare returns tags := make(map[string]string) fields := make(map[string]interface{}) @@ -60,9 +128,9 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er if err != nil { var e net.Error if errors.As(err, &e) && e.Timeout() { - setResult(Timeout, fields, tags, n.Expect) + setResult(timeout, fields, tags, n.Expect) } else { - setResult(ConnectionFailed, fields, tags, n.Expect) + setResult(connectionFailed, fields, tags, n.Expect) } return tags, fields, nil } @@ -91,27 +159,25 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er responseTime = time.Since(start).Seconds() // Handle error if err != nil { - setResult(ReadFailed, fields, tags, n.Expect) + setResult(readFailed, fields, tags, n.Expect) } else { // Looking for string in answer regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) find := regEx.FindString(data) if find != "" { - setResult(Success, fields, tags, n.Expect) + setResult(success, fields, tags, n.Expect) } else { - setResult(StringMismatch, fields, tags, n.Expect) + setResult(stringMismatch, fields, tags, n.Expect) } } } else { - setResult(Success, fields, tags, n.Expect) + setResult(success, fields, tags, n.Expect) } fields["response_time"] = responseTime return tags, fields, nil } -// UDPGather will execute if there are UDP tests defined in the configuration. -// It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, error) { +func (n *NetResponse) udpGather() (map[string]string, map[string]interface{}, error) { // Prepare returns tags := make(map[string]string) fields := make(map[string]interface{}) @@ -121,14 +187,14 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er udpAddr, err := net.ResolveUDPAddr("udp", n.Address) // Handle error if err != nil { - setResult(ConnectionFailed, fields, tags, n.Expect) + setResult(connectionFailed, fields, tags, n.Expect) return tags, fields, nil } // Connecting conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { - setResult(ConnectionFailed, fields, tags, n.Expect) + setResult(connectionFailed, fields, tags, n.Expect) return tags, fields, nil } defer conn.Close() @@ -149,7 +215,7 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er responseTime := time.Since(start).Seconds() // Handle error if err != nil { - setResult(ReadFailed, fields, tags, n.Expect) + setResult(readFailed, fields, tags, n.Expect) return tags, fields, nil } @@ -157,9 +223,9 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) find := regEx.FindString(string(buf)) if find != "" { - setResult(Success, fields, tags, n.Expect) + setResult(success, fields, tags, n.Expect) } else { - setResult(StringMismatch, fields, tags, n.Expect) + setResult(stringMismatch, fields, tags, n.Expect) } fields["response_time"] = responseTime @@ -167,94 +233,18 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er return tags, fields, nil } -// Init performs one time setup of the plugin and returns an error if the -// configuration is invalid. -func (n *NetResponse) Init() error { - // Set default values - if n.Timeout == 0 { - n.Timeout = config.Duration(time.Second) - } - if n.ReadTimeout == 0 { - n.ReadTimeout = config.Duration(time.Second) - } - // Check send and expected string - if n.Protocol == "udp" && n.Send == "" { - return errors.New("send string cannot be empty") - } - if n.Protocol == "udp" && n.Expect == "" { - return errors.New("expected string cannot be empty") - } - // Prepare host and port - host, port, err := net.SplitHostPort(n.Address) - if err != nil { - return err - } - if host == "" { - n.Address = "localhost:" + port - } - if port == "" { - return errors.New("bad port in config option address") - } - - if err := choice.Check(n.Protocol, []string{"tcp", "udp"}); err != nil { - return fmt.Errorf("config option protocol: %w", err) - } - - return nil -} - -// Gather is called by telegraf when the plugin is executed on its interval. -// It will call either UDPGather or TCPGather based on the configuration and -// also fill an Accumulator that is supplied. -func (n *NetResponse) Gather(acc telegraf.Accumulator) error { - // Prepare host and port - host, port, err := net.SplitHostPort(n.Address) - if err != nil { - return err - } - - // Prepare data - tags := map[string]string{"server": host, "port": port} - var fields map[string]interface{} - var returnTags map[string]string - - // Gather data - switch n.Protocol { - case "tcp": - returnTags, fields, err = n.TCPGather() - if err != nil { - return err - } - tags["protocol"] = "tcp" - case "udp": - returnTags, fields, err = n.UDPGather() - if err != nil { - return err - } - tags["protocol"] = "udp" - } - - // Merge the tags - for k, v := range returnTags { - tags[k] = v - } - // Add metrics - acc.AddFields("net_response", fields, tags) - return nil -} - -func setResult(result ResultType, fields map[string]interface{}, tags map[string]string, expect string) { +func setResult(result resultType, fields map[string]interface{}, tags map[string]string, expect string) { var tag string switch result { - case Success: + case success: tag = "success" - case Timeout: + case timeout: tag = "timeout" - case ConnectionFailed: + case connectionFailed: tag = "connection_failed" - case ReadFailed: + case readFailed: tag = "read_failed" - case StringMismatch: + case stringMismatch: tag = "string_mismatch" } @@ -266,7 +256,7 @@ func setResult(result ResultType, fields map[string]interface{}, tags map[string // deprecated in 1.4; use result tag if expect != "" { - fields["string_found"] = result == Success + fields["string_found"] = result == success } } diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index bfb6c2ce803c5..8621d2ca68c96 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -106,7 +106,7 @@ func TestTCPOK1(t *testing.T) { require.NoError(t, c.Init()) // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + go tcpServer(t, &wg) wg.Wait() // Wait for the server to spin up wg.Add(1) // Connect @@ -151,7 +151,7 @@ func TestTCPOK2(t *testing.T) { require.NoError(t, c.Init()) // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + go tcpServer(t, &wg) wg.Wait() wg.Add(1) @@ -233,7 +233,7 @@ func TestUDPOK1(t *testing.T) { require.NoError(t, c.Init()) // Start UDP server wg.Add(1) - go UDPServer(t, &wg) + go udpServer(t, &wg) wg.Wait() wg.Add(1) @@ -264,7 +264,7 @@ func TestUDPOK1(t *testing.T) { wg.Wait() } -func UDPServer(t *testing.T, wg *sync.WaitGroup) { +func udpServer(t *testing.T, wg *sync.WaitGroup) { defer wg.Done() udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:2004") if err != nil { @@ -297,7 +297,7 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup) { } } -func TCPServer(t *testing.T, wg *sync.WaitGroup) { +func tcpServer(t *testing.T, wg *sync.WaitGroup) { defer wg.Done() tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") if err != nil { diff --git a/plugins/inputs/netflow/netflow.go b/plugins/inputs/netflow/netflow.go index ad0a351e5de67..218d9b5296dba 100644 --- a/plugins/inputs/netflow/netflow.go +++ b/plugins/inputs/netflow/netflow.go @@ -19,11 +19,6 @@ import ( //go:embed sample.conf var sampleConfig string -type protocolDecoder interface { - Init() error - Decode(net.IP, []byte) ([]telegraf.Metric, error) -} - type NetFlow struct { ServiceAddress string `toml:"service_address"` ReadBufferSize config.Size `toml:"read_buffer_size"` @@ -37,6 +32,11 @@ type NetFlow struct { wg sync.WaitGroup } +type protocolDecoder interface { + init() error + decode(net.IP, []byte) ([]telegraf.Metric, error) +} + func (*NetFlow) SampleConfig() string { return sampleConfig } @@ -61,12 +61,12 @@ func (n *NetFlow) Init() error { n.Log.Warn("'private_enterprise_number_files' option will be ignored in 'netflow v9'") } n.decoder = &netflowDecoder{ - Log: n.Log, + log: n.Log, } case "", "ipfix": n.decoder = &netflowDecoder{ - PENFiles: n.PENFiles, - Log: n.Log, + penFiles: n.PENFiles, + log: n.Log, } case "netflow v5": if len(n.PENFiles) != 0 { @@ -74,12 +74,12 @@ func (n *NetFlow) Init() error { } n.decoder = &netflowv5Decoder{} case "sflow", "sflow v5": - n.decoder = &sflowv5Decoder{Log: n.Log} + n.decoder = &sflowv5Decoder{log: n.Log} default: return fmt.Errorf("invalid protocol %q, only supports 'sflow', 'netflow v5', 'netflow v9' and 'ipfix'", n.Protocol) } - return n.decoder.Init() + return n.decoder.init() } func (n *NetFlow) Start(acc telegraf.Accumulator) error { @@ -114,6 +114,10 @@ func (n *NetFlow) Start(acc telegraf.Accumulator) error { return nil } +func (n *NetFlow) Gather(_ telegraf.Accumulator) error { + return nil +} + func (n *NetFlow) Stop() { if n.conn != nil { _ = n.conn.Close() @@ -138,7 +142,7 @@ func (n *NetFlow) read(acc telegraf.Accumulator) { if n.Log.Level().Includes(telegraf.Trace) || n.DumpPackets { // for backward compatibility n.Log.Tracef("raw data: %s", hex.EncodeToString(buf[:count])) } - metrics, err := n.decoder.Decode(src.IP, buf[:count]) + metrics, err := n.decoder.decode(src.IP, buf[:count]) if err != nil { errWithData := fmt.Errorf("%w; raw data: %s", err, hex.EncodeToString(buf[:count])) acc.AddError(errWithData) @@ -150,10 +154,6 @@ func (n *NetFlow) read(acc telegraf.Accumulator) { } } -func (n *NetFlow) Gather(_ telegraf.Accumulator) error { - return nil -} - // Register the plugin func init() { inputs.Add("netflow", func() telegraf.Input { diff --git a/plugins/inputs/netflow/netflow_decoder.go b/plugins/inputs/netflow/netflow_decoder.go index 8285db76318f0..aa6f40fc8305a 100644 --- a/plugins/inputs/netflow/netflow_decoder.go +++ b/plugins/inputs/netflow/netflow_decoder.go @@ -530,8 +530,8 @@ var fieldMappingsIPFIX = map[uint16][]fieldMapping{ // Decoder structure type netflowDecoder struct { - PENFiles []string - Log telegraf.Logger + penFiles []string + log telegraf.Logger templates map[string]netflow.NetFlowTemplateSystem mappingsV9 map[uint16]fieldMapping @@ -542,7 +542,7 @@ type netflowDecoder struct { sync.Mutex } -func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (d *netflowDecoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { var metrics []telegraf.Metric t := time.Now() @@ -563,7 +563,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric if err := netflow.DecodeMessageVersion(buf, templates, &msg9, &msg10); err != nil { if errors.Is(err, netflow.ErrorTemplateNotFound) { msg := "Skipping packet until the device resends the required template..." - d.Log.Warnf("%v. %s", err, msg) + d.log.Warnf("%v. %s", err, msg) return nil, nil } return nil, fmt.Errorf("decoding message failed: %w", err) @@ -587,7 +587,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.ScopesValues { decodedFields, err := d.decodeValueV9(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -597,7 +597,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.OptionsValues { decodedFields, err := d.decodeValueV9(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -616,7 +616,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.Values { decodedFields, err := d.decodeValueV9(value) if err != nil { - d.Log.Errorf("decoding record %+v failed: %v", record, err) + d.log.Errorf("decoding record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -643,7 +643,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.ScopesValues { decodedFields, err := d.decodeValueIPFIX(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -653,7 +653,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.OptionsValues { decodedFields, err := d.decodeValueIPFIX(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -673,7 +673,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.Values { decodedFields, err := d.decodeValueIPFIX(value) if err != nil { - d.Log.Errorf("decoding value %+v failed: %v", value, err) + d.log.Errorf("decoding value %+v failed: %v", value, err) continue } for _, field := range decodedFields { @@ -691,7 +691,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric return metrics, nil } -func (d *netflowDecoder) Init() error { +func (d *netflowDecoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } @@ -703,8 +703,8 @@ func (d *netflowDecoder) Init() error { d.mappingsV9 = make(map[uint16]fieldMapping) d.mappingsIPFIX = make(map[uint16]fieldMapping) d.mappingsPEN = make(map[string]fieldMapping) - for _, fn := range d.PENFiles { - d.Log.Debugf("Loading PEN mapping file %q...", fn) + for _, fn := range d.penFiles { + d.log.Debugf("Loading PEN mapping file %q...", fn) mappings, err := loadMapping(fn) if err != nil { return err @@ -719,7 +719,7 @@ func (d *netflowDecoder) Init() error { d.mappingsPEN[k] = v } } - d.Log.Infof("Loaded %d PEN mappings...", len(d.mappingsPEN)) + d.log.Infof("Loaded %d PEN mappings...", len(d.mappingsPEN)) d.logged = make(map[string]bool) @@ -783,7 +783,7 @@ func (d *netflowDecoder) decodeValueV9(field netflow.DataField) ([]telegraf.Fiel // Return the raw data if no mapping was found key := fmt.Sprintf("type_%d", elementID) if !d.logged[key] { - d.Log.Debugf("unknown Netflow v9 data field %v", field) + d.log.Debugf("unknown Netflow v9 data field %v", field) d.logged[key] = true } v, err := decodeHex(raw) @@ -817,7 +817,7 @@ func (d *netflowDecoder) decodeValueIPFIX(field netflow.DataField) ([]telegraf.F return []telegraf.Field{{Key: name, Value: v}}, nil } if !d.logged[key] { - d.Log.Debugf("unknown IPFIX PEN data field %v", field) + d.log.Debugf("unknown IPFIX PEN data field %v", field) d.logged[key] = true } name := fmt.Sprintf("type_%d_%s%d", field.Pen, prefix, elementID) @@ -866,7 +866,7 @@ func (d *netflowDecoder) decodeValueIPFIX(field netflow.DataField) ([]telegraf.F // Return the raw data if no mapping was found key := fmt.Sprintf("type_%d", elementID) if !d.logged[key] { - d.Log.Debugf("unknown IPFIX data field %v", field) + d.log.Debugf("unknown IPFIX data field %v", field) d.logged[key] = true } v, err := decodeHex(raw) diff --git a/plugins/inputs/netflow/netflow_v5.go b/plugins/inputs/netflow/netflow_v5.go index ee3e9d2c3b662..839a1d0943598 100644 --- a/plugins/inputs/netflow/netflow_v5.go +++ b/plugins/inputs/netflow/netflow_v5.go @@ -15,14 +15,14 @@ import ( // Decoder structure type netflowv5Decoder struct{} -func (d *netflowv5Decoder) Init() error { +func (d *netflowv5Decoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } return nil } -func (d *netflowv5Decoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (d *netflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { src := srcIP.String() // Decode the message diff --git a/plugins/inputs/netflow/sflow_v5.go b/plugins/inputs/netflow/sflow_v5.go index 4d7a773b7d654..6e43680f3a597 100644 --- a/plugins/inputs/netflow/sflow_v5.go +++ b/plugins/inputs/netflow/sflow_v5.go @@ -19,13 +19,13 @@ import ( // Decoder structure type sflowv5Decoder struct { - Log telegraf.Logger + log telegraf.Logger warnedCounterRaw map[uint32]bool warnedFlowRaw map[int64]bool } -func (d *sflowv5Decoder) Init() error { +func (d *sflowv5Decoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } @@ -35,7 +35,7 @@ func (d *sflowv5Decoder) Init() error { return nil } -func (d *sflowv5Decoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (d *sflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { t := time.Now() src := srcIP.String() @@ -448,11 +448,11 @@ func (d *sflowv5Decoder) decodeRawHeaderSample(record *sflow.SampledHeader) (map if !d.warnedFlowRaw[ltype] { contents := hex.EncodeToString(pkt.LayerContents()) payload := hex.EncodeToString(pkt.LayerPayload()) - d.Log.Warnf("Unknown flow raw flow message %s (%d):", pkt.LayerType().String(), pkt.LayerType()) - d.Log.Warnf(" contents: %s", contents) - d.Log.Warnf(" payload: %s", payload) + d.log.Warnf("Unknown flow raw flow message %s (%d):", pkt.LayerType().String(), pkt.LayerType()) + d.log.Warnf(" contents: %s", contents) + d.log.Warnf(" payload: %s", payload) - d.Log.Warn("This message is only printed once.") + d.log.Warn("This message is only printed once.") } d.warnedFlowRaw[ltype] = true } @@ -524,8 +524,8 @@ func (d *sflowv5Decoder) decodeCounterRecords(records []sflow.CounterRecord) (ma default: if !d.warnedCounterRaw[r.Header.DataFormat] { data := hex.EncodeToString(record.Data) - d.Log.Warnf("Unknown counter raw flow message %d: %s", r.Header.DataFormat, data) - d.Log.Warn("This message is only printed once.") + d.log.Warnf("Unknown counter raw flow message %d: %s", r.Header.DataFormat, data) + d.log.Warn("This message is only printed once.") } d.warnedCounterRaw[r.Header.DataFormat] = true } diff --git a/plugins/inputs/netstat/netstat.go b/plugins/inputs/netstat/netstat.go index 18da7083cdaea..febdfa50d3bfb 100644 --- a/plugins/inputs/netstat/netstat.go +++ b/plugins/inputs/netstat/netstat.go @@ -14,16 +14,16 @@ import ( //go:embed sample.conf var sampleConfig string -type NetStats struct { - PS system.PS +type NetStat struct { + ps system.PS } -func (*NetStats) SampleConfig() string { +func (*NetStat) SampleConfig() string { return sampleConfig } -func (ns *NetStats) Gather(acc telegraf.Accumulator) error { - netconns, err := ns.PS.NetConnections() +func (ns *NetStat) Gather(acc telegraf.Accumulator) error { + netconns, err := ns.ps.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %w", err) } @@ -66,6 +66,6 @@ func (ns *NetStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("netstat", func() telegraf.Input { - return &NetStats{PS: system.NewSystemPS()} + return &NetStat{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/netstat/netstat_test.go b/plugins/inputs/netstat/netstat_test.go index 05cc4b227eb44..5ae6e48151382 100644 --- a/plugins/inputs/netstat/netstat_test.go +++ b/plugins/inputs/netstat/netstat_test.go @@ -32,7 +32,7 @@ func TestNetStats(t *testing.T) { }, nil) var acc testutil.Accumulator - require.NoError(t, (&NetStats{PS: &mps}).Gather(&acc)) + require.NoError(t, (&NetStat{ps: &mps}).Gather(&acc)) expected := []telegraf.Metric{ metric.New( diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index 3f1f71835b659..e496fe476e7e2 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -31,36 +31,193 @@ type NFSClient struct { mountstatsPath string } -func convertToUint64(line []string) ([]uint64, error) { - /* A "line" of input data (a pre-split array of strings) is - processed one field at a time. Each field is converted to - an uint64 value, and appended to an array of return values. - On an error, check for ErrRange, and returns an error - if found. This situation indicates a pretty major issue in - the /proc/self/mountstats file, and returning faulty data - is worse than no data. Other errors are ignored, and append - whatever we got in the first place (probably 0). - Yes, this is ugly. */ +func (*NFSClient) SampleConfig() string { + return sampleConfig +} - if len(line) < 2 { - return nil, nil +func (n *NFSClient) Init() error { + var nfs3Fields = []string{ + "NULL", + "GETATTR", + "SETATTR", + "LOOKUP", + "ACCESS", + "READLINK", + "READ", + "WRITE", + "CREATE", + "MKDIR", + "SYMLINK", + "MKNOD", + "REMOVE", + "RMDIR", + "RENAME", + "LINK", + "READDIR", + "READDIRPLUS", + "FSSTAT", + "FSINFO", + "PATHCONF", + "COMMIT", } - nline := make([]uint64, 0, len(line[1:])) - // Skip the first field; it's handled specially as the "first" variable - for _, l := range line[1:] { - val, err := strconv.ParseUint(l, 10, 64) - if err != nil { - var numError *strconv.NumError - if errors.As(err, &numError) { - if errors.Is(numError.Err, strconv.ErrRange) { - return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) - } + var nfs4Fields = []string{ + "NULL", + "READ", + "WRITE", + "COMMIT", + "OPEN", + "OPEN_CONFIRM", + "OPEN_NOATTR", + "OPEN_DOWNGRADE", + "CLOSE", + "SETATTR", + "FSINFO", + "RENEW", + "SETCLIENTID", + "SETCLIENTID_CONFIRM", + "LOCK", + "LOCKT", + "LOCKU", + "ACCESS", + "GETATTR", + "LOOKUP", + "LOOKUP_ROOT", + "REMOVE", + "RENAME", + "LINK", + "SYMLINK", + "CREATE", + "PATHCONF", + "STATFS", + "READLINK", + "READDIR", + "SERVER_CAPS", + "DELEGRETURN", + "GETACL", + "SETACL", + "FS_LOCATIONS", + "RELEASE_LOCKOWNER", + "SECINFO", + "FSID_PRESENT", + "EXCHANGE_ID", + "CREATE_SESSION", + "DESTROY_SESSION", + "SEQUENCE", + "GET_LEASE_TIME", + "RECLAIM_COMPLETE", + "LAYOUTGET", + "GETDEVICEINFO", + "LAYOUTCOMMIT", + "LAYOUTRETURN", + "SECINFO_NO_NAME", + "TEST_STATEID", + "FREE_STATEID", + "GETDEVICELIST", + "BIND_CONN_TO_SESSION", + "DESTROY_CLIENTID", + "SEEK", + "ALLOCATE", + "DEALLOCATE", + "LAYOUTSTATS", + "CLONE", + "COPY", + "OFFLOAD_CANCEL", + "LOOKUPP", + "LAYOUTERROR", + "COPY_NOTIFY", + "GETXATTR", + "SETXATTR", + "LISTXATTRS", + "REMOVEXATTR", + } + + nfs3Ops := make(map[string]bool) + nfs4Ops := make(map[string]bool) + + n.mountstatsPath = n.getMountStatsPath() + + if len(n.IncludeOperations) == 0 { + for _, Op := range nfs3Fields { + nfs3Ops[Op] = true + } + for _, Op := range nfs4Fields { + nfs4Ops[Op] = true + } + } else { + for _, Op := range n.IncludeOperations { + nfs3Ops[Op] = true + } + for _, Op := range n.IncludeOperations { + nfs4Ops[Op] = true + } + } + + if len(n.ExcludeOperations) > 0 { + for _, Op := range n.ExcludeOperations { + if nfs3Ops[Op] { + delete(nfs3Ops, Op) + } + if nfs4Ops[Op] { + delete(nfs4Ops, Op) } } - nline = append(nline, val) } - return nline, nil + + n.nfs3Ops = nfs3Ops + n.nfs4Ops = nfs4Ops + + if len(n.IncludeMounts) > 0 { + n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) + } else { + n.Log.Debugf("Including all mounts.") + } + + if len(n.ExcludeMounts) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) + } else { + n.Log.Debugf("Not excluding any mounts.") + } + + if len(n.IncludeOperations) > 0 { + n.Log.Debugf("Including these operations: %v", n.IncludeOperations) + } else { + n.Log.Debugf("Including all operations.") + } + + if len(n.ExcludeOperations) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) + } else { + n.Log.Debugf("Not excluding any operations.") + } + + return nil +} + +func (n *NFSClient) Gather(acc telegraf.Accumulator) error { + if _, err := os.Stat(n.mountstatsPath); os.IsNotExist(err) { + return err + } + + // Attempt to read the file to see if we have permissions before opening + // which can lead to a panic + if _, err := os.ReadFile(n.mountstatsPath); err != nil { + return err + } + + file, err := os.Open(n.mountstatsPath) + if err != nil { + n.Log.Errorf("Failed opening the %q file: %v ", file.Name(), err) + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if err := n.processText(scanner, acc); err != nil { + return err + } + + return scanner.Err() } func (n *NFSClient) parseStat(mountpoint, export, version string, line []string, acc telegraf.Accumulator) error { @@ -291,193 +448,36 @@ func (n *NFSClient) getMountStatsPath() string { return path } -func (*NFSClient) SampleConfig() string { - return sampleConfig -} - -func (n *NFSClient) Gather(acc telegraf.Accumulator) error { - if _, err := os.Stat(n.mountstatsPath); os.IsNotExist(err) { - return err - } - - // Attempt to read the file to see if we have permissions before opening - // which can lead to a panic - if _, err := os.ReadFile(n.mountstatsPath); err != nil { - return err - } - - file, err := os.Open(n.mountstatsPath) - if err != nil { - n.Log.Errorf("Failed opening the %q file: %v ", file.Name(), err) - return err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - if err := n.processText(scanner, acc); err != nil { - return err - } - - return scanner.Err() -} - -func (n *NFSClient) Init() error { - var nfs3Fields = []string{ - "NULL", - "GETATTR", - "SETATTR", - "LOOKUP", - "ACCESS", - "READLINK", - "READ", - "WRITE", - "CREATE", - "MKDIR", - "SYMLINK", - "MKNOD", - "REMOVE", - "RMDIR", - "RENAME", - "LINK", - "READDIR", - "READDIRPLUS", - "FSSTAT", - "FSINFO", - "PATHCONF", - "COMMIT", - } - - var nfs4Fields = []string{ - "NULL", - "READ", - "WRITE", - "COMMIT", - "OPEN", - "OPEN_CONFIRM", - "OPEN_NOATTR", - "OPEN_DOWNGRADE", - "CLOSE", - "SETATTR", - "FSINFO", - "RENEW", - "SETCLIENTID", - "SETCLIENTID_CONFIRM", - "LOCK", - "LOCKT", - "LOCKU", - "ACCESS", - "GETATTR", - "LOOKUP", - "LOOKUP_ROOT", - "REMOVE", - "RENAME", - "LINK", - "SYMLINK", - "CREATE", - "PATHCONF", - "STATFS", - "READLINK", - "READDIR", - "SERVER_CAPS", - "DELEGRETURN", - "GETACL", - "SETACL", - "FS_LOCATIONS", - "RELEASE_LOCKOWNER", - "SECINFO", - "FSID_PRESENT", - "EXCHANGE_ID", - "CREATE_SESSION", - "DESTROY_SESSION", - "SEQUENCE", - "GET_LEASE_TIME", - "RECLAIM_COMPLETE", - "LAYOUTGET", - "GETDEVICEINFO", - "LAYOUTCOMMIT", - "LAYOUTRETURN", - "SECINFO_NO_NAME", - "TEST_STATEID", - "FREE_STATEID", - "GETDEVICELIST", - "BIND_CONN_TO_SESSION", - "DESTROY_CLIENTID", - "SEEK", - "ALLOCATE", - "DEALLOCATE", - "LAYOUTSTATS", - "CLONE", - "COPY", - "OFFLOAD_CANCEL", - "LOOKUPP", - "LAYOUTERROR", - "COPY_NOTIFY", - "GETXATTR", - "SETXATTR", - "LISTXATTRS", - "REMOVEXATTR", - } - - nfs3Ops := make(map[string]bool) - nfs4Ops := make(map[string]bool) - - n.mountstatsPath = n.getMountStatsPath() +func convertToUint64(line []string) ([]uint64, error) { + /* A "line" of input data (a pre-split array of strings) is + processed one field at a time. Each field is converted to + an uint64 value, and appended to an array of return values. + On an error, check for ErrRange, and returns an error + if found. This situation indicates a pretty major issue in + the /proc/self/mountstats file, and returning faulty data + is worse than no data. Other errors are ignored, and append + whatever we got in the first place (probably 0). + Yes, this is ugly. */ - if len(n.IncludeOperations) == 0 { - for _, Op := range nfs3Fields { - nfs3Ops[Op] = true - } - for _, Op := range nfs4Fields { - nfs4Ops[Op] = true - } - } else { - for _, Op := range n.IncludeOperations { - nfs3Ops[Op] = true - } - for _, Op := range n.IncludeOperations { - nfs4Ops[Op] = true - } + if len(line) < 2 { + return nil, nil } - if len(n.ExcludeOperations) > 0 { - for _, Op := range n.ExcludeOperations { - if nfs3Ops[Op] { - delete(nfs3Ops, Op) - } - if nfs4Ops[Op] { - delete(nfs4Ops, Op) + nline := make([]uint64, 0, len(line[1:])) + // Skip the first field; it's handled specially as the "first" variable + for _, l := range line[1:] { + val, err := strconv.ParseUint(l, 10, 64) + if err != nil { + var numError *strconv.NumError + if errors.As(err, &numError) { + if errors.Is(numError.Err, strconv.ErrRange) { + return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) + } } } + nline = append(nline, val) } - - n.nfs3Ops = nfs3Ops - n.nfs4Ops = nfs4Ops - - if len(n.IncludeMounts) > 0 { - n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) - } else { - n.Log.Debugf("Including all mounts.") - } - - if len(n.ExcludeMounts) > 0 { - n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) - } else { - n.Log.Debugf("Not excluding any mounts.") - } - - if len(n.IncludeOperations) > 0 { - n.Log.Debugf("Including these operations: %v", n.IncludeOperations) - } else { - n.Log.Debugf("Including all operations.") - } - - if len(n.ExcludeOperations) > 0 { - n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) - } else { - n.Log.Debugf("Not excluding any operations.") - } - - return nil + return nline, nil } func init() { diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index bf6909208dcfb..0d20653cff546 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -23,8 +23,8 @@ import ( var sampleConfig string type Nginx struct { - Urls []string - ResponseTimeout config.Duration + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig // HTTP client diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index ed9b450acd277..2f32161368404 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -276,11 +276,11 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum if err := dec.Decode(status); err != nil { return errors.New("error while decoding JSON response") } - status.Gather(tags, acc) + status.gather(tags, acc) return nil } -func (s *status) Gather(tags map[string]string, acc telegraf.Accumulator) { +func (s *status) gather(tags map[string]string, acc telegraf.Accumulator) { s.gatherProcessesMetrics(tags, acc) s.gatherConnectionsMetrics(tags, acc) s.gatherSslMetrics(tags, acc) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index acf3be64a57f4..02dbba3516690 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -18,15 +18,6 @@ import ( //go:embed sample.conf var sampleConfig string -type NginxPlusAPI struct { - Urls []string `toml:"urls"` - APIVersion int64 `toml:"api_version"` - ResponseTimeout config.Duration `toml:"response_timeout"` - tls.ClientConfig - - client *http.Client -} - const ( // Default settings defaultAPIVersion = 3 @@ -49,6 +40,15 @@ const ( streamUpstreamsPath = "stream/upstreams" ) +type NginxPlusAPI struct { + Urls []string `toml:"urls"` + APIVersion int64 `toml:"api_version"` + ResponseTimeout config.Duration `toml:"response_timeout"` + tls.ClientConfig + + client *http.Client +} + func (*NginxPlusAPI) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go index 75dddeb9b9481..18c7bba4eedea 100644 --- a/plugins/inputs/nginx_sts/nginx_sts.go +++ b/plugins/inputs/nginx_sts/nginx_sts.go @@ -106,7 +106,7 @@ func (n *NginxSTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } } -type NginxSTSResponse struct { +type nginxSTSResponse struct { Connections struct { Active uint64 `json:"active"` Reading uint64 `json:"reading"` @@ -117,12 +117,12 @@ type NginxSTSResponse struct { Requests uint64 `json:"requests"` } `json:"connections"` Hostname string `json:"hostName"` - StreamFilterZones map[string]map[string]Server `json:"streamFilterZones"` - StreamServerZones map[string]Server `json:"streamServerZones"` - StreamUpstreamZones map[string][]Upstream `json:"streamUpstreamZones"` + StreamFilterZones map[string]map[string]server `json:"streamFilterZones"` + StreamServerZones map[string]server `json:"streamServerZones"` + StreamUpstreamZones map[string][]upstream `json:"streamUpstreamZones"` } -type Server struct { +type server struct { ConnectCounter uint64 `json:"connectCounter"` InBytes uint64 `json:"inBytes"` OutBytes uint64 `json:"outBytes"` @@ -137,7 +137,7 @@ type Server struct { } `json:"responses"` } -type Upstream struct { +type upstream struct { Server string `json:"server"` ConnectCounter uint64 `json:"connectCounter"` InBytes uint64 `json:"inBytes"` @@ -166,7 +166,7 @@ type Upstream struct { func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) - status := &NginxSTSResponse{} + status := &nginxSTSResponse{} if err := dec.Decode(status); err != nil { return errors.New("error while decoding JSON response") } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 46a142878a779..c1d02e5cae9f9 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -34,31 +34,15 @@ type NginxUpstreamCheck struct { client *http.Client } -func NewNginxUpstreamCheck() *NginxUpstreamCheck { - return &NginxUpstreamCheck{ - URL: "http://127.0.0.1/status?format=json", - Method: "GET", - Headers: make(map[string]string), - HostHeader: "", - Timeout: config.Duration(time.Second * 5), - } -} - -func init() { - inputs.Add("nginx_upstream_check", func() telegraf.Input { - return NewNginxUpstreamCheck() - }) -} - -type NginxUpstreamCheckData struct { +type nginxUpstreamCheckData struct { Servers struct { Total uint64 `json:"total"` Generation uint64 `json:"generation"` - Server []NginxUpstreamCheckServer `json:"server"` + Server []nginxUpstreamCheckServer `json:"server"` } `json:"servers"` } -type NginxUpstreamCheckServer struct { +type nginxUpstreamCheckServer struct { Index uint64 `json:"index"` Upstream string `json:"upstream"` Name string `json:"name"` @@ -69,6 +53,33 @@ type NginxUpstreamCheckServer struct { Port uint16 `json:"port"` } +func (*NginxUpstreamCheck) SampleConfig() string { + return sampleConfig +} + +func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { + if check.client == nil { + client, err := check.createHTTPClient() + + if err != nil { + return err + } + check.client = client + } + + statusURL, err := url.Parse(check.URL) + if err != nil { + return err + } + + err = check.gatherStatusData(statusURL.String(), accumulator) + if err != nil { + return err + } + + return nil +} + // createHTTPClient create a clients to access API func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { tlsConfig, err := check.ClientConfig.TLSConfig() @@ -130,35 +141,8 @@ func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{ return nil } -func (*NginxUpstreamCheck) SampleConfig() string { - return sampleConfig -} - -func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { - if check.client == nil { - client, err := check.createHTTPClient() - - if err != nil { - return err - } - check.client = client - } - - statusURL, err := url.Parse(check.URL) - if err != nil { - return err - } - - err = check.gatherStatusData(statusURL.String(), accumulator) - if err != nil { - return err - } - - return nil -} - func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { - checkData := &NginxUpstreamCheckData{} + checkData := &nginxUpstreamCheckData{} err := check.gatherJSONData(address, checkData) if err != nil { @@ -197,3 +181,19 @@ func (check *NginxUpstreamCheck) getStatusCode(status string) uint8 { return 0 } } + +func newNginxUpstreamCheck() *NginxUpstreamCheck { + return &NginxUpstreamCheck{ + URL: "http://127.0.0.1/status?format=json", + Method: "GET", + Headers: make(map[string]string), + HostHeader: "", + Timeout: config.Duration(time.Second * 5), + } +} + +func init() { + inputs.Add("nginx_upstream_check", func() telegraf.Input { + return newNginxUpstreamCheck() + }) +} diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index 4cd10020e3ea5..7eb9b065a892d 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -58,7 +58,7 @@ func TestNginxUpstreamCheckData(test *testing.T) { })) defer testServer.Close() - check := NewNginxUpstreamCheck() + check := newNginxUpstreamCheck() check.URL = testServer.URL + "/status" var accumulator testutil.Accumulator @@ -139,7 +139,7 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { })) defer testServer.Close() - check := NewNginxUpstreamCheck() + check := newNginxUpstreamCheck() check.URL = testServer.URL + "/status" check.Headers["X-test"] = "test-value" check.HostHeader = "status.local" diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index 2dea49d3623bd..68b06edeb1f62 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -106,7 +106,7 @@ func (n *NginxVTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } } -type NginxVTSResponse struct { +type nginxVTSResponse struct { Connections struct { Active uint64 `json:"active"` Reading uint64 `json:"reading"` @@ -116,13 +116,13 @@ type NginxVTSResponse struct { Handled uint64 `json:"handled"` Requests uint64 `json:"requests"` } `json:"connections"` - ServerZones map[string]Server `json:"serverZones"` - FilterZones map[string]map[string]Server `json:"filterZones"` - UpstreamZones map[string][]Upstream `json:"upstreamZones"` - CacheZones map[string]Cache `json:"cacheZones"` + ServerZones map[string]server `json:"serverZones"` + FilterZones map[string]map[string]server `json:"filterZones"` + UpstreamZones map[string][]upstream `json:"upstreamZones"` + CacheZones map[string]cache `json:"cacheZones"` } -type Server struct { +type server struct { RequestCounter uint64 `json:"requestCounter"` InBytes uint64 `json:"inBytes"` OutBytes uint64 `json:"outBytes"` @@ -144,7 +144,7 @@ type Server struct { } `json:"responses"` } -type Upstream struct { +type upstream struct { Server string `json:"server"` RequestCounter uint64 `json:"requestCounter"` InBytes uint64 `json:"inBytes"` @@ -165,7 +165,7 @@ type Upstream struct { Down bool `json:"down"` } -type Cache struct { +type cache struct { MaxSize uint64 `json:"maxSize"` UsedSize uint64 `json:"usedSize"` InBytes uint64 `json:"inBytes"` @@ -184,7 +184,7 @@ type Cache struct { func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) - status := &NginxVTSResponse{} + status := &nginxVTSResponse{} if err := dec.Decode(status); err != nil { return errors.New("error while decoding JSON response") } diff --git a/plugins/inputs/nomad/nomad.go b/plugins/inputs/nomad/nomad.go index 85a9d9636e98b..b297feb235d10 100644 --- a/plugins/inputs/nomad/nomad.go +++ b/plugins/inputs/nomad/nomad.go @@ -18,27 +18,16 @@ import ( //go:embed sample.conf var sampleConfig string -// Nomad configuration object -type Nomad struct { - URL string `toml:"url"` +const timeLayout = "2006-01-02 15:04:05 -0700 MST" +type Nomad struct { + URL string `toml:"url"` ResponseTimeout config.Duration `toml:"response_timeout"` - tls.ClientConfig roundTripper http.RoundTripper } -const timeLayout = "2006-01-02 15:04:05 -0700 MST" - -func init() { - inputs.Add("nomad", func() telegraf.Input { - return &Nomad{ - ResponseTimeout: config.Duration(5 * time.Second), - } - }) -} - func (*Nomad) SampleConfig() string { return sampleConfig } @@ -161,3 +150,11 @@ func buildNomadMetrics(acc telegraf.Accumulator, summaryMetrics *metricsSummary) return nil } + +func init() { + inputs.Add("nomad", func() telegraf.Input { + return &Nomad{ + ResponseTimeout: config.Duration(5 * time.Second), + } + }) +} diff --git a/plugins/inputs/nomad/nomad_metrics.go b/plugins/inputs/nomad/nomad_metrics.go index 8c2d4a1e9eefa..d4eece9bda1e1 100644 --- a/plugins/inputs/nomad/nomad_metrics.go +++ b/plugins/inputs/nomad/nomad_metrics.go @@ -37,6 +37,7 @@ type sampledValue struct { DisplayLabels map[string]string `json:"Labels"` } +// AggregateSample needs to be exported, because JSON decode cannot set embedded pointer to unexported struct type AggregateSample struct { Count int `json:"count"` Rate float64 `json:"rate"` diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index deac3855aa0e7..4704aacd97116 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -21,61 +21,27 @@ import ( //go:embed sample.conf var sampleConfig string -type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) +var ( + defaultBinary = "/usr/sbin/nsd-control" + defaultTimeout = config.Duration(time.Second) +) -// NSD is used to store configuration values type NSD struct { - Binary string - Timeout config.Duration - UseSudo bool - Server string - ConfigFile string + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` + Server string `toml:"server"` + ConfigFile string `toml:"config_file"` run runner } -var defaultBinary = "/usr/sbin/nsd-control" -var defaultTimeout = config.Duration(time.Second) - -// Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server, configFile string) (*bytes.Buffer, error) { - cmdArgs := []string{"stats_noreset"} - - if server != "" { - host, port, err := net.SplitHostPort(server) - if err == nil { - server = host + "@" + port - } - - cmdArgs = append([]string{"-s", server}, cmdArgs...) - } - - if configFile != "" { - cmdArgs = append([]string{"-c", configFile}, cmdArgs...) - } - - cmd := exec.Command(cmdName, cmdArgs...) - - if useSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) - cmd = exec.Command("sudo", cmdArgs...) - } - - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) - if err != nil { - return &out, fmt.Errorf("error running nsd-control: %w (%s %v)", err, cmdName, cmdArgs) - } - - return &out, nil -} +type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) func (*NSD) SampleConfig() string { return sampleConfig } -// Gather collects stats from nsd-control and adds them to the Accumulator func (s *NSD) Gather(acc telegraf.Accumulator) error { out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ConfigFile) if err != nil { @@ -133,6 +99,40 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { return nil } +// Shell out to nsd_stat and return the output +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server, configFile string) (*bytes.Buffer, error) { + cmdArgs := []string{"stats_noreset"} + + if server != "" { + host, port, err := net.SplitHostPort(server) + if err == nil { + server = host + "@" + port + } + + cmdArgs = append([]string{"-s", server}, cmdArgs...) + } + + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) + } + + cmd := exec.Command(cmdName, cmdArgs...) + + if useSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running nsd-control: %w (%s %v)", err, cmdName, cmdArgs) + } + + return &out, nil +} + func init() { inputs.Add("nsd", func() telegraf.Input { return &NSD{ diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index dad4c0e925ff0..8aba9e8852946 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { +func nsdControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } @@ -19,7 +19,7 @@ func NSDControl(output string) func(string, config.Duration, bool, string, strin func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &NSD{ - run: NSDControl(fullOutput), + run: nsdControl(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index b124d28ee3707..b9c3b4b88053a 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -42,25 +42,15 @@ import ( //go:embed sample.conf var sampleConfig string -// Might add Lookupd endpoints for cluster discovery -type NSQ struct { - Endpoints []string - tls.ClientConfig - httpClient *http.Client -} - const ( requestPattern = `%s/stats?format=json` ) -func init() { - inputs.Add("nsq", func() telegraf.Input { - return New() - }) -} +type NSQ struct { + Endpoints []string `toml:"endpoints"` -func New() *NSQ { - return &NSQ{} + tls.ClientConfig + httpClient *http.Client } func (*NSQ) SampleConfig() string { @@ -305,3 +295,13 @@ type clientStats struct { TLSNegotiatedProtocol string `json:"tls_negotiated_protocol"` TLSNegotiatedProtocolIsMutual bool `json:"tls_negotiated_protocol_is_mutual"` } + +func newNSQ() *NSQ { + return &NSQ{} +} + +func init() { + inputs.Add("nsq", func() telegraf.Input { + return newNSQ() + }) +} diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index b2887c2b62edb..713585208fec0 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -23,7 +23,7 @@ func TestNSQStatsV1(t *testing.T) { })) defer ts.Close() - n := New() + n := newNSQ() n.Endpoints = []string{ts.URL} var acc testutil.Accumulator @@ -283,7 +283,7 @@ func TestNSQStatsPreV1(t *testing.T) { })) defer ts.Close() - n := New() + n := newNSQ() n.Endpoints = []string{ts.URL} var acc testutil.Accumulator diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 6117558874559..69f2a0aea73a1 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -20,40 +20,39 @@ const ( defaultMaxUndeliveredMessages = 1000 ) -type empty struct{} -type semaphore chan empty - -type logger struct { - log telegraf.Logger -} - -func (l *logger) Output(_ int, s string) error { - l.log.Debug(s) - return nil -} - -// NSQConsumer represents the configuration of the plugin type NSQConsumer struct { - Server string `toml:"server" deprecated:"1.5.0;1.35.0;use 'nsqd' instead"` - Nsqd []string `toml:"nsqd"` - Nsqlookupd []string `toml:"nsqlookupd"` - Topic string `toml:"topic"` - Channel string `toml:"channel"` - MaxInFlight int `toml:"max_in_flight"` - - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Server string `toml:"server" deprecated:"1.5.0;1.35.0;use 'nsqd' instead"` + Nsqd []string `toml:"nsqd"` + Nsqlookupd []string `toml:"nsqlookupd"` + Topic string `toml:"topic"` + Channel string `toml:"channel"` + MaxInFlight int `toml:"max_in_flight"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Log telegraf.Logger `toml:"-"` parser telegraf.Parser consumer *nsq.Consumer - Log telegraf.Logger - mu sync.Mutex messages map[telegraf.TrackingID]*nsq.Message wg sync.WaitGroup cancel context.CancelFunc } +type ( + empty struct{} + semaphore chan empty +) + +type logger struct { + log telegraf.Logger +} + +func (l *logger) Output(_ int, s string) error { + l.log.Debug(s) + return nil +} + func (*NSQConsumer) SampleConfig() string { return sampleConfig } @@ -77,7 +76,6 @@ func (n *NSQConsumer) SetParser(parser telegraf.Parser) { n.parser = parser } -// Start pulls data from nsq func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { acc := ac.WithTracking(n.MaxUndeliveredMessages) sem := make(semaphore, n.MaxUndeliveredMessages) @@ -140,6 +138,17 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil } +func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (n *NSQConsumer) Stop() { + n.cancel() + n.wg.Wait() + n.consumer.Stop() + <-n.consumer.StopChan +} + func (n *NSQConsumer) onDelivery(ctx context.Context, acc telegraf.TrackingAccumulator, sem semaphore) { for { select { @@ -165,19 +174,6 @@ func (n *NSQConsumer) onDelivery(ctx context.Context, acc telegraf.TrackingAccum } } -// Stop processing messages -func (n *NSQConsumer) Stop() { - n.cancel() - n.wg.Wait() - n.consumer.Stop() - <-n.consumer.StopChan -} - -// Gather is a noop -func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { - return nil -} - func (n *NSQConsumer) connect() error { if n.consumer == nil { config := nsq.NewConfig() diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 7f517f7b55506..b6b149c867918 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -20,20 +20,18 @@ var ( colonByte = []byte(":") ) -// default file paths const ( - NetNetstat = "/net/netstat" - NetSnmp = "/net/snmp" - NetSnmp6 = "/net/snmp6" - NetProc = "/proc" -) - -// env variable names -const ( - EnvNetstat = "PROC_NET_NETSTAT" - EnvSnmp = "PROC_NET_SNMP" - EnvSnmp6 = "PROC_NET_SNMP6" - EnvRoot = "PROC_ROOT" + // default file paths + netNetstat = "/net/netstat" + netSnmp = "/net/snmp" + netSnmp6 = "/net/snmp6" + netProc = "/proc" + + // env variable names + envNetstat = "PROC_NET_NETSTAT" + envSnmp = "PROC_NET_SNMP" + envSnmp6 = "PROC_NET_SNMP6" + envRoot = "PROC_ROOT" ) type Nstat struct { @@ -104,13 +102,13 @@ func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) { // if it is empty then try read from env variables func (ns *Nstat) loadPaths() { if ns.ProcNetNetstat == "" { - ns.ProcNetNetstat = proc(EnvNetstat, NetNetstat) + ns.ProcNetNetstat = proc(envNetstat, netNetstat) } if ns.ProcNetSNMP == "" { - ns.ProcNetSNMP = proc(EnvSnmp, NetSnmp) + ns.ProcNetSNMP = proc(envSnmp, netSnmp) } if ns.ProcNetSNMP6 == "" { - ns.ProcNetSNMP6 = proc(EnvSnmp6, NetSnmp6) + ns.ProcNetSNMP6 = proc(envSnmp6, netSnmp6) } } @@ -188,9 +186,9 @@ func proc(env, path string) string { return p } // try to read root path, or use default root path - root := os.Getenv(EnvRoot) + root := os.Getenv(envRoot) if root == "" { - root = NetProc + root = netProc } return root + path } diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index fc5254f18bf70..e24b1f1a94f3d 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -30,16 +30,25 @@ var reBrackets = regexp.MustCompile(`\s+\([\S]*`) type elementType int64 const ( - None elementType = iota - Tag - FieldFloat - FieldDuration - FieldIntDecimal - FieldIntOctal - FieldIntRatio8 - FieldIntBits + none elementType = iota + tag + fieldFloat + fieldDuration + fieldIntDecimal + fieldIntOctal + fieldIntRatio8 + fieldIntBits ) +type NTPQ struct { + DNSLookup bool `toml:"dns_lookup" deprecated:"1.24.0;1.35.0;add '-n' to 'options' instead to skip DNS lookup"` + Options string `toml:"options"` + Servers []string `toml:"servers"` + ReachFormat string `toml:"reach_format"` + + runQ func(string) ([]byte, error) +} + type column struct { name string etype elementType @@ -55,21 +64,12 @@ var tagHeaders = map[string]string{ // Mapping of fields var fieldElements = map[string]elementType{ - "delay": FieldFloat, - "jitter": FieldFloat, - "offset": FieldFloat, - "reach": FieldIntDecimal, - "poll": FieldDuration, - "when": FieldDuration, -} - -type NTPQ struct { - DNSLookup bool `toml:"dns_lookup" deprecated:"1.24.0;1.35.0;add '-n' to 'options' instead to skip DNS lookup"` - Options string `toml:"options"` - Servers []string `toml:"servers"` - ReachFormat string `toml:"reach_format"` - - runQ func(string) ([]byte, error) + "delay": fieldFloat, + "jitter": fieldFloat, + "offset": fieldFloat, + "reach": fieldIntDecimal, + "poll": fieldDuration, + "when": fieldDuration, } func (*NTPQ) SampleConfig() string { @@ -117,19 +117,19 @@ func (n *NTPQ) Init() error { n.ReachFormat = "octal" // Interpret the field as decimal integer returning // the raw (octal) representation - fieldElements["reach"] = FieldIntDecimal + fieldElements["reach"] = fieldIntDecimal case "decimal": // Interpret the field as octal integer returning // decimal number representation - fieldElements["reach"] = FieldIntOctal + fieldElements["reach"] = fieldIntOctal case "count": // Interpret the field as bits set returning // the number of bits set - fieldElements["reach"] = FieldIntBits + fieldElements["reach"] = fieldIntBits case "ratio": // Interpret the field as ratio between the number of // bits set and the maximum available bits set (8). - fieldElements["reach"] = FieldIntRatio8 + fieldElements["reach"] = fieldIntRatio8 default: return fmt.Errorf("unknown 'reach_format' %q", n.ReachFormat) } @@ -176,7 +176,7 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { if name, isTag := tagHeaders[el]; isTag { columns = append(columns, column{ name: name, - etype: Tag, + etype: tag, }) continue } @@ -191,7 +191,7 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { } // Skip the column if not found - columns = append(columns, column{etype: None}) + columns = append(columns, column{etype: none}) } break } @@ -221,11 +221,11 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { col := columns[i] switch col.etype { - case None: + case none: continue - case Tag: + case tag: tags[col.name] = raw - case FieldFloat: + case fieldFloat: value, err := strconv.ParseFloat(raw, 64) if err != nil { msg := fmt.Sprintf("%sparsing %q (%v) as float failed", msgPrefix, col.name, raw) @@ -233,7 +233,7 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { continue } fields[col.name] = value - case FieldDuration: + case fieldDuration: // Ignore fields only containing a minus if raw == "-" { continue @@ -257,28 +257,28 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { continue } fields[col.name] = value * factor - case FieldIntDecimal: + case fieldIntDecimal: value, err := strconv.ParseInt(raw, 10, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) continue } fields[col.name] = value - case FieldIntOctal: + case fieldIntOctal: value, err := strconv.ParseInt(raw, 8, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) continue } fields[col.name] = value - case FieldIntBits: + case fieldIntBits: value, err := strconv.ParseUint(raw, 8, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) continue } fields[col.name] = bits.OnesCount64(value) - case FieldIntRatio8: + case fieldIntRatio8: value, err := strconv.ParseUint(raw, 8, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) diff --git a/plugins/inputs/nvidia_smi/common/setters.go b/plugins/inputs/nvidia_smi/common/setters.go index 4c5e0772578c4..c74cbfe246fbb 100644 --- a/plugins/inputs/nvidia_smi/common/setters.go +++ b/plugins/inputs/nvidia_smi/common/setters.go @@ -5,12 +5,14 @@ import ( "strings" ) +// SetTagIfUsed sets those tags whose value is different from empty string. func SetTagIfUsed(m map[string]string, k, v string) { if v != "" { m[k] = v } } +// SetIfUsed sets those fields whose value is different from empty string. func SetIfUsed(t string, m map[string]interface{}, k, v string) { vals := strings.Fields(v) if len(vals) < 1 { diff --git a/plugins/inputs/nvidia_smi/schema_v11/parser.go b/plugins/inputs/nvidia_smi/schema_v11/parser.go index ae45117d6008d..05864790c4808 100644 --- a/plugins/inputs/nvidia_smi/schema_v11/parser.go +++ b/plugins/inputs/nvidia_smi/schema_v11/parser.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi/common" ) +// Parse parses the XML-encoded data from nvidia-smi and adds measurements. func Parse(acc telegraf.Accumulator, buf []byte) error { var s smi if err := xml.Unmarshal(buf, &s); err != nil { diff --git a/plugins/inputs/nvidia_smi/schema_v12/parser.go b/plugins/inputs/nvidia_smi/schema_v12/parser.go index 4bc88bd017e70..bafd884017596 100644 --- a/plugins/inputs/nvidia_smi/schema_v12/parser.go +++ b/plugins/inputs/nvidia_smi/schema_v12/parser.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi/common" ) +// Parse parses the XML-encoded data from nvidia-smi and adds measurements. func Parse(acc telegraf.Accumulator, buf []byte) error { var s smi if err := xml.Unmarshal(buf, &s); err != nil { From 16401c73cbb7b13cd55b0f3bd807179c7bc22154 Mon Sep 17 00:00:00 2001 From: Mingyang Zheng Date: Mon, 25 Nov 2024 02:26:09 -0800 Subject: [PATCH 026/170] fix(logging): Add Close() func for redirectLogger (#16219) --- logger/handler.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/logger/handler.go b/logger/handler.go index 7f23b32b0a6b6..6bc066658d183 100644 --- a/logger/handler.go +++ b/logger/handler.go @@ -125,3 +125,13 @@ func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix str msg := append([]interface{}{ts.In(time.UTC).Format(time.RFC3339), " ", level.Indicator(), " ", prefix + attrMsg}, args...) fmt.Fprintln(l.writer, msg...) } + +func (l *redirectLogger) Close() error { + if l.writer == os.Stderr { + return nil + } + if closer, ok := l.writer.(io.Closer); ok { + return closer.Close() + } + return nil +} From 26366121a8f3d34b917996f6f66bf22da709b2e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:46:39 +0100 Subject: [PATCH 027/170] chore(deps): Bump github.com/vishvananda/netns from 0.0.4 to 0.0.5 (#16199) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index c73d28d3afbeb..352c32787ffa2 100644 --- a/go.mod +++ b/go.mod @@ -198,7 +198,7 @@ require ( github.com/urfave/cli/v2 v2.27.2 github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241 github.com/vishvananda/netlink v1.3.0 - github.com/vishvananda/netns v0.0.4 + github.com/vishvananda/netns v0.0.5 github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.45.1 github.com/wavefronthq/wavefront-sdk-go v0.15.0 diff --git a/go.sum b/go.sum index e2e73023c90e4..ea543f920d23c 100644 --- a/go.sum +++ b/go.sum @@ -2353,8 +2353,9 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.45.1 h1:pmMmSUNIw/kePaCRFaUOpDh7IxDfhDi9M4Qh+DRlBV4= From a5dc7aa6b2515fdf82316647e5937996f4e1c8ef Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Fri, 29 Nov 2024 17:02:13 +0800 Subject: [PATCH 028/170] Update plugins/outputs/inlong/README.md Co-authored-by: Charles Zhang --- plugins/outputs/inlong/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index 24ce988111af6..46708ecbbdda8 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -1,6 +1,6 @@ # Inlong Output Plugin -This plugin writes telegraf metrics to Inlong +This plugin writes telegraf metrics to [Apache InLong](https://inlong.apache.org/docs/next/introduction). ## Global configuration options From 69af04c97b300244ad551d61ce392b4068c65eb6 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Fri, 29 Nov 2024 17:09:11 +0800 Subject: [PATCH 029/170] feat(outputs): Fix line too long error --- plugins/outputs/inlong/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index 46708ecbbdda8..2e578dcae3dc1 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -1,6 +1,7 @@ # Inlong Output Plugin -This plugin writes telegraf metrics to [Apache InLong](https://inlong.apache.org/docs/next/introduction). +This plugin writes telegraf metrics to +[Apache InLong](https://inlong.apache.org/docs/next/introduction). ## Global configuration options From 3ed4b799b69828daaf61dc5d32da591cd8c28e0e Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Fri, 29 Nov 2024 17:15:38 +0800 Subject: [PATCH 030/170] feat(outputs): Fix lint code error --- plugins/outputs/inlong/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index 2e578dcae3dc1..425c173ba41b1 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -2,7 +2,6 @@ This plugin writes telegraf metrics to [Apache InLong](https://inlong.apache.org/docs/next/introduction). - ## Global configuration options In addition to the plugin-specific configuration settings, plugins support From d6491171cd5e601fa3d2ddc108640e3dd4373659 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Fri, 29 Nov 2024 20:03:47 +0800 Subject: [PATCH 031/170] feat(outputs): Trigger PR rerun --- plugins/outputs/inlong/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index 425c173ba41b1..2e578dcae3dc1 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -2,6 +2,7 @@ This plugin writes telegraf metrics to [Apache InLong](https://inlong.apache.org/docs/next/introduction). + ## Global configuration options In addition to the plugin-specific configuration settings, plugins support From d4f3e7d08ac7c0a116f6d862fbcce1932b062417 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Sat, 30 Nov 2024 01:08:30 +0800 Subject: [PATCH 032/170] Update README.md --- plugins/outputs/inlong/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index 2e578dcae3dc1..425c173ba41b1 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -2,7 +2,6 @@ This plugin writes telegraf metrics to [Apache InLong](https://inlong.apache.org/docs/next/introduction). - ## Global configuration options In addition to the plugin-specific configuration settings, plugins support From 8902a401e6e18791489688329ca141ebdbd89b88 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Sat, 30 Nov 2024 12:00:27 +0800 Subject: [PATCH 033/170] Update README.md --- plugins/outputs/inlong/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index 425c173ba41b1..a782875d2fda6 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -1,7 +1,8 @@ # Inlong Output Plugin -This plugin writes telegraf metrics to +This plugin writes telegraf metrics to [Apache InLong](https://inlong.apache.org/docs/next/introduction). + ## Global configuration options In addition to the plugin-specific configuration settings, plugins support From 55d498e2bac26ccec344cbb5aa2d2a129fe46edf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:07:29 -0600 Subject: [PATCH 034/170] chore(deps): Bump golang.org/x/net from 0.30.0 to 0.31.0 (#16236) --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 352c32787ffa2..0071bec366894 100644 --- a/go.mod +++ b/go.mod @@ -212,13 +212,13 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net v0.0.0-20240925182052-1207426daebd go.step.sm/crypto v0.54.0 - golang.org/x/crypto v0.28.0 + golang.org/x/crypto v0.29.0 golang.org/x/mod v0.21.0 - golang.org/x/net v0.30.0 + golang.org/x/net v0.31.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 - golang.org/x/sys v0.26.0 - golang.org/x/term v0.25.0 + golang.org/x/sys v0.27.0 + golang.org/x/term v0.26.0 golang.org/x/text v0.20.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 gonum.org/v1/gonum v0.15.1 diff --git a/go.sum b/go.sum index ea543f920d23c..4cfdf355ef7c1 100644 --- a/go.sum +++ b/go.sum @@ -2523,8 +2523,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2677,8 +2677,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2864,8 +2864,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2883,8 +2883,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 1d078d966dc83f2225dcb028c56a8d89d3db31e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:07:53 -0600 Subject: [PATCH 035/170] chore(deps): Bump cloud.google.com/go/bigquery from 1.63.1 to 1.64.0 (#16232) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0071bec366894..959adc6e8dfa6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/influxdata/telegraf go 1.23.0 require ( - cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/monitoring v1.21.1 cloud.google.com/go/pubsub v1.45.1 cloud.google.com/go/storage v1.43.0 diff --git a/go.sum b/go.sum index 4cfdf355ef7c1..07ac3633455e5 100644 --- a/go.sum +++ b/go.sum @@ -131,8 +131,8 @@ cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/Zur cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= +cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= From 3349d603b77e130d0dba4d09f95c7afac665ceac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:08:23 -0600 Subject: [PATCH 036/170] chore(deps): Bump google.golang.org/grpc from 1.67.1 to 1.68.0 (#16233) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 959adc6e8dfa6..42709f4115599 100644 --- a/go.mod +++ b/go.mod @@ -224,7 +224,7 @@ require ( gonum.org/v1/gonum v0.15.1 google.golang.org/api v0.203.0 google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 - google.golang.org/grpc v1.67.1 + google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.1 gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/olivere/elastic.v5 v5.0.86 diff --git a/go.sum b/go.sum index 07ac3633455e5..e256e1123f6f6 100644 --- a/go.sum +++ b/go.sum @@ -3269,8 +3269,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From c5061de0d31f7c6c786ef722b7ccad6dcd63f197 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:09:00 -0600 Subject: [PATCH 037/170] chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.29.3 to 1.32.6 (#16234) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 42709f4115599..4f15e93118e2d 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.36.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 - github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6 github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 github.com/aws/smithy-go v1.22.1 @@ -277,7 +277,7 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/awnumar/memcall v0.3.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect diff --git a/go.sum b/go.sum index e256e1123f6f6..2200a9f3756ef 100644 --- a/go.sum +++ b/go.sum @@ -860,8 +860,8 @@ github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJ github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= @@ -919,8 +919,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4G github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 h1:ktR7RUdUQ8m9rkgCPRsS7iTJgFp9MXEX0nltrT8bxY4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3/go.mod h1:hufTMUGSlcBLGgs6leSPbDfY1sM3mrO2qjtVkPMTDhE= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6 h1:yN7WEx9ksiP5+9zdKtoQYrUT51HvYw+EA1TXsElvMyk= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6/go.mod h1:j8MNat6qtGw5OoEACRbWtT8r5my4nRWfM/6Uk+NsuC4= github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC9OpLedkZ51DWl8= github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= From d062a5df84e9377221922fb4f4a4d1fb6d43c22f Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:10:03 +0100 Subject: [PATCH 038/170] chore(actions): Only check PR title for semantic commit message (#16253) --- .github/workflows/semantic.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml index 7dc9f439d0460..ea869d99b4c54 100644 --- a/.github/workflows/semantic.yml +++ b/.github/workflows/semantic.yml @@ -11,5 +11,5 @@ jobs: semantic: uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main with: - CHECK_PR_TITLE_OR_ONE_COMMIT: true + COMMITS_HISTORY: 0 From f26decbea668d995e956d5c960ba5f76c68c48ae Mon Sep 17 00:00:00 2001 From: Baker X <40594937+Benxiaohai001@users.noreply.github.com> Date: Wed, 4 Dec 2024 01:10:36 +0800 Subject: [PATCH 039/170] docs(serializers.json): Fix typo (#16245) --- plugins/serializers/json/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md index 78b9ee63a332c..edf50ee8bb45f 100644 --- a/plugins/serializers/json/README.md +++ b/plugins/serializers/json/README.md @@ -102,7 +102,7 @@ reference the documentation for the specific plugin. ## Transformations Transformations using the [JSONata standard](https://jsonata.org/) can be specified with -the `json_tansformation` parameter. The input to the transformation is the serialized +the `json_transformation` parameter. The input to the transformation is the serialized metric in the standard-form above. **Note**: There is a difference in batch and non-batch serialization mode! From 9cff0ceebe4f81b70d2fa7af662265fc5c73f6d2 Mon Sep 17 00:00:00 2001 From: Jose Luis Gonzalez Calvo <90149790+joseluisgonzalezca@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:12:38 +0100 Subject: [PATCH 040/170] fix(inputs.netflow): Decode flags in TCP and IP headers correctly (#16248) Co-authored-by: jlgonzalez --- plugins/inputs/netflow/sflow_v5.go | 33 ++++++++++++++++++------------ 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/netflow/sflow_v5.go b/plugins/inputs/netflow/sflow_v5.go index 6e43680f3a597..7ac616bf54ebd 100644 --- a/plugins/inputs/netflow/sflow_v5.go +++ b/plugins/inputs/netflow/sflow_v5.go @@ -391,12 +391,13 @@ func (d *sflowv5Decoder) decodeRawHeaderSample(record *sflow.SampledHeader) (map fields["dst"] = l.DstIP.String() flags := []byte("........") - switch { - case l.Flags&layers.IPv4EvilBit > 0: + if l.Flags&layers.IPv4EvilBit > 0 { flags[7] = byte('E') - case l.Flags&layers.IPv4DontFragment > 0: + } + if l.Flags&layers.IPv4DontFragment > 0 { flags[6] = byte('D') - case l.Flags&layers.IPv4MoreFragments > 0: + } + if l.Flags&layers.IPv4MoreFragments > 0 { flags[5] = byte('M') } fields["fragment_flags"] = string(flags) @@ -418,22 +419,28 @@ func (d *sflowv5Decoder) decodeRawHeaderSample(record *sflow.SampledHeader) (map fields["tcp_window_size"] = l.Window fields["tcp_urgent_ptr"] = l.Urgent flags := []byte("........") - switch { - case l.FIN: + if l.FIN { flags[7] = byte('F') - case l.SYN: + } + if l.SYN { flags[6] = byte('S') - case l.RST: + } + if l.RST { flags[5] = byte('R') - case l.PSH: + } + if l.PSH { flags[4] = byte('P') - case l.ACK: + } + if l.ACK { flags[3] = byte('A') - case l.URG: + } + if l.URG { flags[2] = byte('U') - case l.ECE: + } + if l.ECE { flags[1] = byte('E') - case l.CWR: + } + if l.CWR { flags[0] = byte('C') } fields["tcp_flags"] = string(flags) From e8b84ce6d96022c3697a6c47f72526881f963797 Mon Sep 17 00:00:00 2001 From: Long FlyBridge Date: Wed, 4 Dec 2024 01:13:13 +0800 Subject: [PATCH 041/170] chore: Fix function names in comments (#16231) Signed-off-by: longxiangqiao --- agent/agent.go | 2 +- config/types.go | 2 +- plugins/inputs/ipmi_sensor/ipmi_sensor_test.go | 4 ++-- plugins/inputs/sensors/sensors_test.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 6ebc4fcdc6a1a..7f00fc6ca9ff5 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -811,7 +811,7 @@ func (a *Agent) startOutputs( return src, unit, nil } -// connectOutputs connects to all outputs. +// connectOutput connects to all outputs. func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error { log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) if err := output.Connect(); err != nil { diff --git a/config/types.go b/config/types.go index 4a8a2822a50e5..f6a7c00ea5130 100644 --- a/config/types.go +++ b/config/types.go @@ -19,7 +19,7 @@ type Duration time.Duration // Size is an int64 type Size int64 -// UnmarshalTOML parses the duration from the TOML config file +// UnmarshalText parses the duration from the Text config file func (d *Duration) UnmarshalText(b []byte) error { // convert to string durStr := string(b) diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go index 25b4576b64bbd..74dab5eb18b8f 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go @@ -214,7 +214,7 @@ func TestGather(t *testing.T) { } } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} @@ -536,7 +536,7 @@ func TestGatherV2(t *testing.T) { } } -// fackeExecCommandV2 is a helper function that mock +// fakeExecCommandV2 is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommandV2(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcessV2", "--", command} diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 47b8e8cbbce70..2a94cea3aa38a 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -290,7 +290,7 @@ func TestGatherNotRemoveNumbers(t *testing.T) { } } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} From e4b2e927d70cbbbb28e8ca9d41abc51b538e0b57 Mon Sep 17 00:00:00 2001 From: stackcoder Date: Tue, 3 Dec 2024 18:14:02 +0100 Subject: [PATCH 042/170] feat(inputs.smart): Add Power on Hours and Cycle Count (#16230) --- plugins/inputs/smart/smart.go | 2 ++ plugins/inputs/smart/smart_test.go | 14 ++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index dec5a45bc6288..e704971d43b76 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -88,6 +88,8 @@ var ( "1": "read_error_rate", "5": "reallocated_sectors_count", "7": "seek_error_rate", + "9": "power_on_hours", + "12": "power_cycle_count", "10": "spin_retry_count", "184": "end_to_end_error", "187": "uncorrectable_errors", diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 449460838bb56..884435d3ccd90 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -53,7 +53,7 @@ func TestGatherAttributes(t *testing.T) { err := s.Gather(&acc) require.NoError(t, err) - require.Equal(t, 68, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 70, acc.NFields(), "Wrong number of fields gathered") for _, test := range testsAda0Attributes { acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags) @@ -172,7 +172,7 @@ func TestGatherNoAttributes(t *testing.T) { err := s.Gather(&acc) require.NoError(t, err) - require.Equal(t, 11, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 13, acc.NFields(), "Wrong number of fields gathered") acc.AssertDoesNotContainMeasurement(t, "smart_attribute") for _, test := range testsAda0Device { @@ -213,7 +213,7 @@ func TestGatherSATAInfo(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 106, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 108, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -229,7 +229,7 @@ func TestGatherSATAInfo65(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 96, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 98, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -294,7 +294,7 @@ func TestGatherSSD(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 110, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 112, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -310,7 +310,7 @@ func TestGatherSSDRaid(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 77, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 79, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -1492,6 +1492,8 @@ var ( "wear_leveling_count": int64(185), "pending_sector_count": int64(0), "reallocated_sectors_count": int64(0), + "power_cycle_count": int64(14879), + "power_on_hours": int64(2988), }, map[string]string{ "device": "ada0", From 543b907cdf46d555afe083de846dc4ed055a4339 Mon Sep 17 00:00:00 2001 From: Phil Bracikowski <13472206+philjb@users.noreply.github.com> Date: Tue, 3 Dec 2024 09:18:46 -0800 Subject: [PATCH 043/170] fix(inputs.procstat): Handle running processes correctly across multiple filters (#16257) --- plugins/inputs/procstat/procstat.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6a50cb317ad2b..4e3e4df6d38c0 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -324,7 +324,7 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { now := time.Now() - + running := make(map[PID]bool) for _, f := range p.Filter { groups, err := f.ApplyFilter() if err != nil { @@ -347,7 +347,6 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { } var count int - running := make(map[PID]bool) for _, g := range groups { count += len(g.processes) for _, gp := range g.processes { @@ -397,13 +396,6 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { } } - // Cleanup processes that are not running anymore - for pid := range p.processes { - if !running[pid] { - delete(p.processes, pid) - } - } - // Add lookup statistics-metric acc.AddFields( "procstat_lookup", @@ -419,6 +411,13 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { now, ) } + + // Cleanup processes that are not running anymore across all filters/groups + for pid := range p.processes { + if !running[pid] { + delete(p.processes, pid) + } + } return nil } From 90c7ea2c9e028d8e3b9a864a80524390823ef795 Mon Sep 17 00:00:00 2001 From: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> Date: Wed, 4 Dec 2024 03:59:21 -0600 Subject: [PATCH 044/170] docs: Fix PostgreSQL example DSN (#16229) --- docs/SQL_DRIVERS_INPUT.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md index 54b2519f8f207..458c6a0931a28 100644 --- a/docs/SQL_DRIVERS_INPUT.md +++ b/docs/SQL_DRIVERS_INPUT.md @@ -3,20 +3,20 @@ This is a list of available drivers for the SQL input plugin. The data-source-name (DSN) is driver specific and might change between versions. Please check the driver documentation for available options and the format. -| database | driver | aliases | example DSN | comment | -| -------------------- | --------------------------------------------------------- | --------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -| ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | -| CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver | -| FlightSQL | [flightsql](https://github.com/apache/arrow/tree/main/go/arrow/flight/flightsql/driver) | | `flightsql://[username[:password]@]host:port?timeout=10s[&token=TOKEN][¶m1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/apache/arrow/blob/main/go/arrow/flight/flightsql/driver/README.md) for more information | -| IBM Netezza | [nzgo](https://github.com/IBM/nzgo) | |`host=your_nz_host port=5480 user=your_nz_user password=your_nz_password dbname=your_nz_db_name sslmode=disable`| see [driver docs](https://pkg.go.dev/github.com/IBM/nzgo/v12) for more | -| MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | -| Microsoft SQL Server | [sqlserver](https://github.com/microsoft/go-mssqldb) | mssql | `sqlserver://username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver | -| MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information | -| Oracle | [oracle](https://github.com/sijms/go-ora) | oracle | `oracle://username:password@host:port/service?param1=value¶m2=value` | see [driver docs](https://github.com/sijms/go-ora/blob/master/README.md) for more information | -| PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information | -| SAP HANA | [go-hdb](https://github.com/SAP/go-hdb) | hana | `hdb://user:password@host:port` | see [driver docs](https://github.com/SAP/go-hdb) for more information | -| SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docs](https://pkg.go.dev/modernc.org/sqlite) for more information | -| TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| database | driver | aliases | example DSN | comment | +| -------------------- | --------------------------------------------------------- | --------------- |------------------------------------------------------------------------------------------------------------------| --------------------------------------------------------------------------------------------------------------------- | +| ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | +| CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver | +| FlightSQL | [flightsql](https://github.com/apache/arrow/tree/main/go/arrow/flight/flightsql/driver) | | `flightsql://[username[:password]@]host:port?timeout=10s[&token=TOKEN][¶m1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/apache/arrow/blob/main/go/arrow/flight/flightsql/driver/README.md) for more information | +| IBM Netezza | [nzgo](https://github.com/IBM/nzgo) | | `host=your_nz_host port=5480 user=your_nz_user password=your_nz_password dbname=your_nz_db_name sslmode=disable` | see [driver docs](https://pkg.go.dev/github.com/IBM/nzgo/v12) for more | +| MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| Microsoft SQL Server | [sqlserver](https://github.com/microsoft/go-mssqldb) | mssql | `sqlserver://username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver | +| MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information | +| Oracle | [oracle](https://github.com/sijms/go-ora) | oracle | `oracle://username:password@host:port/service?param1=value¶m2=value` | see [driver docs](https://github.com/sijms/go-ora/blob/master/README.md) for more information | +| PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `postgresql://[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information | +| SAP HANA | [go-hdb](https://github.com/SAP/go-hdb) | hana | `hdb://user:password@host:port` | see [driver docs](https://github.com/SAP/go-hdb) for more information | +| SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docs](https://pkg.go.dev/modernc.org/sqlite) for more information | +| TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | ## Comments From b12eb5a60eb970739f2caf5e2748fda1ffd44861 Mon Sep 17 00:00:00 2001 From: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:30:15 -0600 Subject: [PATCH 045/170] feat(inputs.vsphere): Add cpu temperature field (#16109) --- plugins/inputs/vsphere/README.md | 2 ++ plugins/inputs/vsphere/finder.go | 1 + 2 files changed, 3 insertions(+) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 97184ec0ae09a..3324c088c9264 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -551,6 +551,8 @@ override the default query interval in the vSphere plugin. * Power: energy, usage * Datastore stats: * Disk: Capacity, provisioned, used +* Numeric Sensor stats: + * CPU: temperature For a detailed list of commonly available metrics, please refer to [METRICS.md](METRICS.md) diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 8d58653c1c1d7..529a6ca92d039 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -263,6 +263,7 @@ func init() { "Datastore": {"parent", "info", "customValue"}, "ClusterComputeResource": {"parent", "customValue"}, "Datacenter": {"parent", "customValue"}, + "HostNumericSensorInfo": {"parent", "temperature", "baseUnits"}, } containers = map[string]interface{}{ From 0ea4c1422ef013b39e10061ce96ac4bdb0ccd93e Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 4 Dec 2024 21:55:11 +0100 Subject: [PATCH 046/170] feat(outputs): Implement partial write errors (#16146) --- internal/errors.go | 20 +++ models/buffer.go | 89 ++++++++--- models/buffer_disk.go | 129 +++++++++++----- models/buffer_disk_test.go | 10 +- models/buffer_mem.go | 68 ++++----- models/buffer_mem_test.go | 5 +- models/buffer_suite_test.go | 275 +++++++++++++++++++++------------- models/running_output.go | 51 ++++--- models/running_output_test.go | 1 + 9 files changed, 426 insertions(+), 222 deletions(-) diff --git a/internal/errors.go b/internal/errors.go index a1f58c3eb2510..d1e098ea441ce 100644 --- a/internal/errors.go +++ b/internal/errors.go @@ -37,3 +37,23 @@ func (e *FatalError) Error() string { func (e *FatalError) Unwrap() error { return e.Err } + +// PartialWriteError indicate that only a subset of the metrics were written +// successfully (i.e. accepted). The rejected metrics should be removed from +// the buffer without being successfully written. Please note: the metrics +// are specified as indices into the batch to be able to reference tracking +// metrics correctly. +type PartialWriteError struct { + Err error + MetricsAccept []int + MetricsReject []int + MetricsRejectErrors []error +} + +func (e *PartialWriteError) Error() string { + return e.Err.Error() +} + +func (e *PartialWriteError) Unwrap() error { + return e.Err +} diff --git a/models/buffer.go b/models/buffer.go index f20e6ee2f9c08..167f56639818f 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -10,12 +10,57 @@ import ( ) var ( - AgentMetricsWritten = selfstat.Register("agent", "metrics_written", make(map[string]string)) - AgentMetricsDropped = selfstat.Register("agent", "metrics_dropped", make(map[string]string)) + AgentMetricsWritten = selfstat.Register("agent", "metrics_written", make(map[string]string)) + AgentMetricsRejected = selfstat.Register("agent", "metrics_rejected", make(map[string]string)) + AgentMetricsDropped = selfstat.Register("agent", "metrics_dropped", make(map[string]string)) registerGob = sync.OnceFunc(func() { metric.Init() }) ) +type Transaction struct { + // Batch of metrics to write + Batch []telegraf.Metric + + // Accept denotes the indices of metrics that were successfully written + Accept []int + // Reject denotes the indices of metrics that were not written but should + // not be requeued + Reject []int + + // Marks this transaction as valid + valid bool + + // Internal state that can be used by the buffer implementation + state interface{} +} + +func (tx *Transaction) AcceptAll() { + tx.Accept = make([]int, len(tx.Batch)) + for i := range tx.Batch { + tx.Accept[i] = i + } +} + +func (tx *Transaction) KeepAll() {} + +func (tx *Transaction) InferKeep() []int { + used := make([]bool, len(tx.Batch)) + for _, idx := range tx.Accept { + used[idx] = true + } + for _, idx := range tx.Reject { + used[idx] = true + } + + keep := make([]int, 0, len(tx.Batch)) + for i := range tx.Batch { + if !used[i] { + keep = append(keep, i) + } + } + return keep +} + type Buffer interface { // Len returns the number of metrics currently in the buffer. Len() int @@ -23,19 +68,15 @@ type Buffer interface { // Add adds metrics to the buffer and returns number of dropped metrics. Add(metrics ...telegraf.Metric) int - // Batch returns a slice containing up to batchSize of the oldest metrics not - // yet dropped. Metrics are ordered from oldest to newest in the batch. The - // batch must not be modified by the client. - Batch(batchSize int) []telegraf.Metric - - // Accept marks the batch, acquired from Batch(), as successfully written. - Accept(metrics []telegraf.Metric) + // Batch starts a transaction by returning a slice of metrics up to the + // given batch-size starting from the oldest metric in the buffer. Metrics + // are ordered from oldest to newest and must not be modified by the plugin. + BeginTransaction(batchSize int) *Transaction - // Reject returns the batch, acquired from Batch(), to the buffer and marks it - // as unsent. - Reject([]telegraf.Metric) + // Flush ends a metric and persists the buffer state + EndTransaction(*Transaction) - // Stats returns the buffer statistics such as rejected, dropped and accepred metrics + // Stats returns the buffer statistics such as rejected, dropped and accepted metrics Stats() BufferStats // Close finalizes the buffer and closes all open resources @@ -45,11 +86,12 @@ type Buffer interface { // BufferStats holds common metrics used for buffer implementations. // Implementations of Buffer should embed this struct in them. type BufferStats struct { - MetricsAdded selfstat.Stat - MetricsWritten selfstat.Stat - MetricsDropped selfstat.Stat - BufferSize selfstat.Stat - BufferLimit selfstat.Stat + MetricsAdded selfstat.Stat + MetricsWritten selfstat.Stat + MetricsRejected selfstat.Stat + MetricsDropped selfstat.Stat + BufferSize selfstat.Stat + BufferLimit selfstat.Stat } // NewBuffer returns a new empty Buffer with the given capacity. @@ -84,6 +126,11 @@ func NewBufferStats(name, alias string, capacity int) BufferStats { "metrics_written", tags, ), + MetricsRejected: selfstat.Register( + "write", + "metrics_rejected", + tags, + ), MetricsDropped: selfstat.Register( "write", "metrics_dropped", @@ -115,6 +162,12 @@ func (b *BufferStats) metricWritten(m telegraf.Metric) { m.Accept() } +func (b *BufferStats) metricRejected(m telegraf.Metric) { + AgentMetricsRejected.Incr(1) + b.MetricsRejected.Incr(1) + m.Reject() +} + func (b *BufferStats) metricDropped(m telegraf.Metric) { AgentMetricsDropped.Incr(1) b.MetricsDropped.Incr(1) diff --git a/models/buffer_disk.go b/models/buffer_disk.go index 57836dbab9070..799ac24758cb1 100644 --- a/models/buffer_disk.go +++ b/models/buffer_disk.go @@ -5,6 +5,8 @@ import ( "fmt" "log" "path/filepath" + "slices" + "sort" "sync" "github.com/tidwall/wal" @@ -31,6 +33,11 @@ type DiskBuffer struct { // we have to do our best and track that the walfile "should" be empty, so that next // write, we can remove the invalid entry (also skipping this entry if it is being read). isEmpty bool + + // The mask contains offsets of metric already removed during a previous + // transaction. Metrics at those offsets should not be contained in new + // batches. + mask []int } func NewDiskBuffer(name, id, path string, stats BufferStats) (*DiskBuffer, error) { @@ -67,7 +74,11 @@ func (b *DiskBuffer) length() int { if b.isEmpty { return 0 } - // Special case for when the read index is zero, it must be empty (otherwise it would be >= 1) + + return b.entries() - len(b.mask) +} + +func (b *DiskBuffer) entries() int { if b.readIndex() == 0 { return 0 } @@ -121,28 +132,33 @@ func (b *DiskBuffer) addSingleMetric(m telegraf.Metric) bool { return false } -func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { +func (b *DiskBuffer) BeginTransaction(batchSize int) *Transaction { b.Lock() defer b.Unlock() if b.length() == 0 { - // no metrics in the wal file, so return an empty array - return make([]telegraf.Metric, 0) + return &Transaction{} } b.batchFirst = b.readIndex() - var metrics []telegraf.Metric - b.batchSize = 0 + + metrics := make([]telegraf.Metric, 0, batchSize) + offsets := make([]int, 0, batchSize) readIndex := b.batchFirst endIndex := b.writeIndex() + offset := 0 for batchSize > 0 && readIndex < endIndex { data, err := b.file.Read(readIndex) if err != nil { panic(err) } readIndex++ + offset++ - m, err := metric.FromBytes(data) + if slices.Contains(b.mask, offset) { + // Metric is masked by a previous write and is scheduled for removal + continue + } // Validate that a tracking metric is from this instance of telegraf and skip ones from older instances. // A tracking metric can be skipped here because metric.Accept() is only called once data is successfully @@ -152,11 +168,12 @@ func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { // - ErrSkipTracking: means that the tracking information was unable to be found for a tracking ID. // - Outside of range: means that the metric was guaranteed to be left over from the previous instance // as it was here when we opened the wal file in this instance. - if errors.Is(err, metric.ErrSkipTracking) { - // could not look up tracking information for metric, skip - continue - } + m, err := metric.FromBytes(data) if err != nil { + if errors.Is(err, metric.ErrSkipTracking) { + // could not look up tracking information for metric, skip + continue + } // non-recoverable error in deserialization, abort log.Printf("E! raw metric data: %v", data) panic(err) @@ -167,33 +184,82 @@ func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { } metrics = append(metrics, m) + offsets = append(offsets, offset) b.batchSize++ batchSize-- } - return metrics + return &Transaction{Batch: metrics, valid: true, state: offsets} } -func (b *DiskBuffer) Accept(batch []telegraf.Metric) { +func (b *DiskBuffer) EndTransaction(tx *Transaction) { + if len(tx.Batch) == 0 { + return + } + + // Ignore invalid transactions and make sure they can only be finished once + if !tx.valid { + return + } + tx.valid = false + + // Get the metric offsets from the transaction + offsets := tx.state.([]int) + b.Lock() defer b.Unlock() - if b.batchSize == 0 || len(batch) == 0 { - // nothing to accept + // Mark metrics which should be removed in the internal mask + remove := make([]int, 0, len(tx.Accept)+len(tx.Reject)) + for _, idx := range tx.Accept { + b.metricWritten(tx.Batch[idx]) + remove = append(remove, offsets[idx]) + } + for _, idx := range tx.Reject { + b.metricRejected(tx.Batch[idx]) + remove = append(remove, offsets[idx]) + } + b.mask = append(b.mask, remove...) + sort.Ints(b.mask) + + // Remove the metrics that are marked for removal from the front of the + // WAL file. All other metrics must be kept. + if len(b.mask) == 0 || b.mask[0] != 0 { + // Mask is empty or the first index is not the front of the file, so + // exit early as there is nothing to remove return } - for _, m := range batch { - b.metricWritten(m) + + // Determine up to which index we can remove the entries from the WAL file + var removeIdx int + for i, offset := range b.mask { + if offset != i { + break + } + removeIdx = offset } - if b.length() == len(batch) { - b.emptyFile() + + // Remove the metrics in front from the WAL file + b.isEmpty = b.entries()-removeIdx-1 <= 0 + if b.isEmpty { + // WAL files cannot be fully empty but need to contain at least one + // item to not throw an error + if err := b.file.TruncateFront(b.writeIndex()); err != nil { + log.Printf("E! batch length: %d, first: %d, size: %d", len(tx.Batch), b.batchFirst, b.batchSize) + panic(err) + } } else { - err := b.file.TruncateFront(b.batchFirst + uint64(len(batch))) - if err != nil { - log.Printf("E! batch length: %d, batchFirst: %d, batchSize: %d", len(batch), b.batchFirst, b.batchSize) + if err := b.file.TruncateFront(b.batchFirst + uint64(removeIdx+1)); err != nil { + log.Printf("E! batch length: %d, first: %d, size: %d", len(tx.Batch), b.batchFirst, b.batchSize) panic(err) } } + // Truncate the mask and update the relative offsets + b.mask = b.mask[:removeIdx] + for i := range b.mask { + b.mask[i] -= removeIdx + } + // check if the original end index is still valid, clear if not if b.originalEnd < b.readIndex() { b.originalEnd = 0 @@ -203,14 +269,6 @@ func (b *DiskBuffer) Accept(batch []telegraf.Metric) { b.BufferSize.Set(int64(b.length())) } -func (b *DiskBuffer) Reject(_ []telegraf.Metric) { - // very little to do here as the disk buffer retains metrics in - // the wal file until a call to accept - b.Lock() - defer b.Unlock() - b.resetBatch() -} - func (b *DiskBuffer) Stats() BufferStats { return b.BufferStats } @@ -238,14 +296,3 @@ func (b *DiskBuffer) handleEmptyFile() { } b.isEmpty = false } - -func (b *DiskBuffer) emptyFile() { - if b.isEmpty || b.length() == 0 { - return - } - if err := b.file.TruncateFront(b.writeIndex() - 1); err != nil { - log.Printf("E! writeIndex: %d, buffer len: %d", b.writeIndex(), b.length()) - panic(err) - } - b.isEmpty = true -} diff --git a/models/buffer_disk_test.go b/models/buffer_disk_test.go index 3f04ef86d6246..15ff25a73c42b 100644 --- a/models/buffer_disk_test.go +++ b/models/buffer_disk_test.go @@ -27,9 +27,9 @@ func TestDiskBufferRetainsTrackingInformation(t *testing.T) { defer buf.Close() buf.Add(mm) - - batch := buf.Batch(1) - buf.Accept(batch) + tx := buf.BeginTransaction(1) + tx.AcceptAll() + buf.EndTransaction(tx) require.Equal(t, 1, delivered) } @@ -85,11 +85,11 @@ func TestDiskBufferTrackingDroppedFromOldWal(t *testing.T) { buf.Stats().MetricsDropped.Set(0) defer buf.Close() - batch := buf.Batch(4) + tx := buf.BeginTransaction(4) // Check that the tracking metric is skipped expected := []telegraf.Metric{ metrics[0], metrics[1], metrics[2], metrics[4], } - testutil.RequireMetricsEqual(t, expected, batch) + testutil.RequireMetricsEqual(t, expected, tx.Batch) } diff --git a/models/buffer_mem.go b/models/buffer_mem.go index 7bba4744f4e07..3c2daa89c51a3 100644 --- a/models/buffer_mem.go +++ b/models/buffer_mem.go @@ -51,67 +51,67 @@ func (b *MemoryBuffer) Add(metrics ...telegraf.Metric) int { return dropped } -func (b *MemoryBuffer) Batch(batchSize int) []telegraf.Metric { +func (b *MemoryBuffer) BeginTransaction(batchSize int) *Transaction { b.Lock() defer b.Unlock() outLen := min(b.size, batchSize) - out := make([]telegraf.Metric, outLen) if outLen == 0 { - return out + return &Transaction{} } b.batchFirst = b.first b.batchSize = outLen - batchIndex := b.batchFirst - for i := range out { - out[i] = b.buf[batchIndex] + batch := make([]telegraf.Metric, outLen) + for i := range batch { + batch[i] = b.buf[batchIndex] b.buf[batchIndex] = nil batchIndex = b.next(batchIndex) } b.first = b.nextby(b.first, b.batchSize) b.size -= outLen - return out + return &Transaction{Batch: batch, valid: true} } -func (b *MemoryBuffer) Accept(batch []telegraf.Metric) { +func (b *MemoryBuffer) EndTransaction(tx *Transaction) { b.Lock() defer b.Unlock() - for _, m := range batch { - b.metricWritten(m) - } - - b.resetBatch() - b.BufferSize.Set(int64(b.length())) -} - -func (b *MemoryBuffer) Reject(batch []telegraf.Metric) { - b.Lock() - defer b.Unlock() - - if len(batch) == 0 { + // Ignore invalid transactions and make sure they can only be finished once + if !tx.valid { return } + tx.valid = false - free := b.cap - b.size - restore := min(len(batch), free) - skip := len(batch) - restore + // Accept metrics + for _, idx := range tx.Accept { + b.metricWritten(tx.Batch[idx]) + } - b.first = b.prevby(b.first, restore) - b.size = min(b.size+restore, b.cap) + // Reject metrics + for _, idx := range tx.Reject { + b.metricRejected(tx.Batch[idx]) + } - re := b.first + // Keep metrics + keep := tx.InferKeep() + if len(keep) > 0 { + restore := min(len(keep), b.cap-b.size) + b.first = b.prevby(b.first, restore) + b.size = min(b.size+restore, b.cap) + + // Restore the metrics that fit into the buffer + current := b.first + for i := 0; i < restore; i++ { + b.buf[current] = tx.Batch[keep[i]] + current = b.next(current) + } - // Copy metrics from the batch back into the buffer - for i := range batch { - if i < skip { - b.metricDropped(batch[i]) - } else { - b.buf[re] = batch[i] - re = b.next(re) + // Drop all remaining metrics + for i := restore; i < len(keep); i++ { + b.metricDropped(tx.Batch[keep[i]]) } } diff --git a/models/buffer_mem_test.go b/models/buffer_mem_test.go index 650bd3bf65c93..8a473fcb5a0a9 100644 --- a/models/buffer_mem_test.go +++ b/models/buffer_mem_test.go @@ -24,8 +24,9 @@ func TestMemoryBufferAcceptCallsMetricAccept(t *testing.T) { }, } buf.Add(mm, mm, mm) - batch := buf.Batch(2) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.AcceptAll() + buf.EndTransaction(tx) require.Equal(t, 2, accept) } diff --git a/models/buffer_suite_test.go b/models/buffer_suite_test.go index 99d008096373a..80ce63bdce95b 100644 --- a/models/buffer_suite_test.go +++ b/models/buffer_suite_test.go @@ -53,6 +53,7 @@ func (s *BufferSuiteTest) newTestBuffer(capacity int) Buffer { s.Require().NoError(err) buf.Stats().MetricsAdded.Set(0) buf.Stats().MetricsWritten.Set(0) + buf.Stats().MetricsRejected.Set(0) buf.Stats().MetricsDropped.Set(0) return buf } @@ -99,16 +100,16 @@ func (s *BufferSuiteTest) TestBufferBatchLenZero() { buf := s.newTestBuffer(5) defer buf.Close() - batch := buf.Batch(0) - s.Empty(batch) + tx := buf.BeginTransaction(0) + s.Empty(tx.Batch) } func (s *BufferSuiteTest) TestBufferBatchLenBufferEmpty() { buf := s.newTestBuffer(5) defer buf.Close() - batch := buf.Batch(2) - s.Empty(batch) + tx := buf.BeginTransaction(2) + s.Empty(tx.Batch) } func (s *BufferSuiteTest) TestBufferBatchLenUnderfill() { @@ -117,8 +118,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenUnderfill() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m) - batch := buf.Batch(2) - s.Len(batch, 1) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 1) } func (s *BufferSuiteTest) TestBufferBatchLenFill() { @@ -127,8 +128,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenFill() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) } func (s *BufferSuiteTest) TestBufferBatchLenExact() { @@ -137,8 +138,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenExact() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) } func (s *BufferSuiteTest) TestBufferBatchLenLargerThanBuffer() { @@ -147,8 +148,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenLargerThanBuffer() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(6) - s.Len(batch, 5) + tx := buf.BeginTransaction(6) + s.Len(tx.Batch, 5) } func (s *BufferSuiteTest) TestBufferBatchWrap() { @@ -157,11 +158,12 @@ func (s *BufferSuiteTest) TestBufferBatchWrap() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(2) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.AcceptAll() + buf.EndTransaction(tx) buf.Add(m, m) - batch = buf.Batch(5) - s.Len(batch, 5) + tx = buf.BeginTransaction(5) + s.Len(tx.Batch, 5) } func (s *BufferSuiteTest) TestBufferBatchLatest() { @@ -171,13 +173,13 @@ func (s *BufferSuiteTest) TestBufferBatchLatest() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferBatchLatestWrap() { @@ -193,13 +195,13 @@ func (s *BufferSuiteTest) TestBufferBatchLatestWrap() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferMultipleBatch() { @@ -212,7 +214,7 @@ func (s *BufferSuiteTest) TestBufferMultipleBatch() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -220,14 +222,16 @@ func (s *BufferSuiteTest) TestBufferMultipleBatch() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) - buf.Accept(batch) - batch = buf.Batch(5) + }, tx.Batch) + tx.AcceptAll() + buf.EndTransaction(tx) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), - }, batch) - buf.Accept(batch) + }, tx.Batch) + tx.AcceptAll() + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferRejectWithRoom() { @@ -237,14 +241,15 @@ func (s *BufferSuiteTest) TestBufferRejectWithRoom() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -252,7 +257,7 @@ func (s *BufferSuiteTest) TestBufferRejectWithRoom() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectNothingNewFull() { @@ -264,12 +269,13 @@ func (s *BufferSuiteTest) TestBufferRejectNothingNewFull() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - batch := buf.Batch(2) - buf.Reject(batch) + tx := buf.BeginTransaction(2) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -277,7 +283,7 @@ func (s *BufferSuiteTest) TestBufferRejectNothingNewFull() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectNoRoom() { @@ -291,18 +297,19 @@ func (s *BufferSuiteTest) TestBufferRejectNoRoom() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(3), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), @@ -310,7 +317,7 @@ func (s *BufferSuiteTest) TestBufferRejectNoRoom() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectRoomExact() { @@ -319,16 +326,17 @@ func (s *BufferSuiteTest) TestBufferRejectRoomExact() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -336,7 +344,7 @@ func (s *BufferSuiteTest) TestBufferRejectRoomExact() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectRoomOverwriteOld() { @@ -350,16 +358,17 @@ func (s *BufferSuiteTest) TestBufferRejectRoomOverwriteOld() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(1) + tx := buf.BeginTransaction(1) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(1), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0)), @@ -367,7 +376,7 @@ func (s *BufferSuiteTest) TestBufferRejectRoomOverwriteOld() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectPartialRoom() { @@ -381,16 +390,17 @@ func (s *BufferSuiteTest) TestBufferRejectPartialRoom() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(2), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), @@ -398,7 +408,7 @@ func (s *BufferSuiteTest) TestBufferRejectPartialRoom() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { @@ -412,7 +422,7 @@ func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) @@ -435,11 +445,12 @@ func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(15, 0))) // buffer: 13, 14, 15, 11, 12; batch: 2, 3 s.Equal(int64(8), buf.Stats().MetricsDropped.Get()) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(10), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0)), @@ -447,7 +458,7 @@ func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(13, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(14, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(15, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectWrapped() { @@ -467,16 +478,17 @@ func (s *BufferSuiteTest) TestBufferRejectWrapped() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0))) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(9, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(12, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0)), @@ -484,7 +496,7 @@ func (s *BufferSuiteTest) TestBufferRejectWrapped() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(12, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectAdjustFirst() { @@ -498,36 +510,39 @@ func (s *BufferSuiteTest) TestBufferRejectAdjustFirst() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(9, 0))) - batch = buf.Batch(3) + tx = buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(12, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(13, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(14, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(15, 0))) - batch = buf.Batch(3) + tx = buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(16, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(17, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(18, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(19, 0))) - batch = buf.Batch(10) + tx = buf.BeginTransaction(10) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0)), @@ -540,7 +555,7 @@ func (s *BufferSuiteTest) TestBufferRejectAdjustFirst() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(17, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(18, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(19, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferAddDropsOverwrittenMetrics() { @@ -565,8 +580,9 @@ func (s *BufferSuiteTest) TestBufferAcceptRemovesBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - batch := buf.Batch(2) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(1, buf.Len()) } @@ -576,8 +592,9 @@ func (s *BufferSuiteTest) TestBufferRejectLeavesBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - batch := buf.Batch(2) - buf.Reject(batch) + tx := buf.BeginTransaction(2) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(3, buf.Len()) } @@ -587,9 +604,10 @@ func (s *BufferSuiteTest) TestBufferAcceptWritesOverwrittenBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(m, m, m, m, m) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) s.Equal(int64(5), buf.Stats().MetricsWritten.Get()) @@ -605,9 +623,10 @@ func (s *BufferSuiteTest) TestBufferBatchRejectDropsOverwrittenBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(m, m, m, m, m) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(5), buf.Stats().MetricsDropped.Get()) s.Equal(int64(0), buf.Stats().MetricsWritten.Get()) @@ -619,9 +638,10 @@ func (s *BufferSuiteTest) TestBufferMetricsOverwriteBatchAccept() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get(), "dropped") s.Equal(int64(3), buf.Stats().MetricsWritten.Get(), "written") } @@ -636,9 +656,10 @@ func (s *BufferSuiteTest) TestBufferMetricsOverwriteBatchReject() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(3), buf.Stats().MetricsDropped.Get()) s.Equal(int64(0), buf.Stats().MetricsWritten.Get()) } @@ -653,9 +674,10 @@ func (s *BufferSuiteTest) TestBufferMetricsBatchAcceptRemoved() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m, m, m) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(int64(2), buf.Stats().MetricsDropped.Get()) s.Equal(int64(3), buf.Stats().MetricsWritten.Get()) } @@ -670,10 +692,10 @@ func (s *BufferSuiteTest) TestBufferWrapWithBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m, m, m, m) - s.Equal(int64(1), buf.Stats().MetricsDropped.Get()) + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferBatchNotRemoved() { @@ -682,8 +704,9 @@ func (s *BufferSuiteTest) TestBufferBatchNotRemoved() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - buf.Batch(2) + tx := buf.BeginTransaction(2) s.Equal(5, buf.Len()) + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferBatchRejectAcceptNoop() { @@ -692,9 +715,11 @@ func (s *BufferSuiteTest) TestBufferBatchRejectAcceptNoop() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(2) - buf.Reject(batch) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.KeepAll() + buf.EndTransaction(tx) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(5, buf.Len()) } @@ -734,10 +759,11 @@ func (s *BufferSuiteTest) TestBufferAddCallsMetricRejectWhenNotInBatch() { }, } buf.Add(mm, mm, mm, mm, mm) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(mm, mm, mm, mm) s.Equal(2, reject) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(4, reject) } @@ -757,10 +783,11 @@ func (s *BufferSuiteTest) TestBufferRejectCallsMetricRejectWithOverwritten() { }, } buf.Add(mm, mm, mm, mm, mm) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(mm, mm) s.Equal(0, reject) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(2, reject) } @@ -780,13 +807,14 @@ func (s *BufferSuiteTest) TestBufferAddOverwriteAndReject() { }, } buf.Add(mm, mm, mm, mm, mm) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(mm, mm, mm, mm, mm) buf.Add(mm, mm, mm, mm, mm) buf.Add(mm, mm, mm, mm, mm) buf.Add(mm, mm, mm, mm, mm) s.Equal(15, reject) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(20, reject) } @@ -812,7 +840,7 @@ func (s *BufferSuiteTest) TestBufferAddOverwriteAndRejectOffset() { buf.Add(mm, mm, mm) buf.Add(mm, mm, mm, mm) s.Equal(2, reject) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(mm, mm, mm, mm) s.Equal(2, reject) buf.Add(mm, mm, mm, mm) @@ -821,7 +849,8 @@ func (s *BufferSuiteTest) TestBufferAddOverwriteAndRejectOffset() { s.Equal(9, reject) buf.Add(mm, mm, mm, mm) s.Equal(13, reject) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(13, reject) s.Equal(5, accept) } @@ -830,14 +859,16 @@ func (s *BufferSuiteTest) TestBufferRejectEmptyBatch() { buf := s.newTestBuffer(5) defer buf.Close() - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) - batch = buf.Batch(2) - for _, m := range batch { + tx = buf.BeginTransaction(2) + for _, m := range tx.Batch { s.NotNil(m) } + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferFlushedPartial() { @@ -847,10 +878,11 @@ func (s *BufferSuiteTest) TestBufferFlushedPartial() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(1, buf.Len()) } @@ -860,13 +892,48 @@ func (s *BufferSuiteTest) TestBufferFlushedFull() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(0, buf.Len()) } +func (s *BufferSuiteTest) TestPartialWriteBackToFront() { + buf := s.newTestBuffer(5) + defer buf.Close() + + m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) + buf.Add(m, m, m, m, m) + + // Get a batch of all metrics but only reject the last one + tx := buf.BeginTransaction(5) + s.Len(tx.Batch, 5) + tx.Reject = []int{4} + buf.EndTransaction(tx) + s.Equal(4, buf.Len()) + + // Get the next batch which should miss the last metric + tx = buf.BeginTransaction(5) + s.Len(tx.Batch, 4) + tx.Accept = []int{3} + buf.EndTransaction(tx) + s.Equal(3, buf.Len()) + + // Now get the next batch and reject the remaining metrics + tx = buf.BeginTransaction(5) + s.Len(tx.Batch, 3) + tx.Accept = []int{0, 1, 2} + buf.EndTransaction(tx) + s.Equal(0, buf.Len()) + + s.Equal(int64(5), buf.Stats().MetricsAdded.Get(), "metrics added") + s.Equal(int64(4), buf.Stats().MetricsWritten.Get(), "metrics written") + s.Equal(int64(1), buf.Stats().MetricsRejected.Get(), "metrics rejected") + s.Equal(int64(0), buf.Stats().MetricsDropped.Get(), "metrics dropped") +} + type mockMetric struct { telegraf.Metric AcceptF func() diff --git a/models/running_output.go b/models/running_output.go index c8a730d572ba6..fd1622c4438de 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -301,22 +301,21 @@ func (r *RunningOutput) Write() error { atomic.StoreInt64(&r.newMetricsCount, 0) - // Only process the metrics in the buffer now. Metrics added while we are + // Only process the metrics in the buffer now. Metrics added while we are // writing will be sent on the next call. nBuffer := r.buffer.Len() nBatches := nBuffer/r.MetricBatchSize + 1 for i := 0; i < nBatches; i++ { - batch := r.buffer.Batch(r.MetricBatchSize) - if len(batch) == 0 { - break + tx := r.buffer.BeginTransaction(r.MetricBatchSize) + if len(tx.Batch) == 0 { + return nil } - - err := r.writeMetrics(batch) + err := r.writeMetrics(tx.Batch) + r.updateTransaction(tx, err) + r.buffer.EndTransaction(tx) if err != nil { - r.buffer.Reject(batch) return err } - r.buffer.Accept(batch) } return nil } @@ -334,19 +333,15 @@ func (r *RunningOutput) WriteBatch() error { r.log.Debugf("Successfully connected after %d attempts", r.retries) } - batch := r.buffer.Batch(r.MetricBatchSize) - if len(batch) == 0 { + tx := r.buffer.BeginTransaction(r.MetricBatchSize) + if len(tx.Batch) == 0 { return nil } + err := r.writeMetrics(tx.Batch) + r.updateTransaction(tx, err) + r.buffer.EndTransaction(tx) - err := r.writeMetrics(batch) - if err != nil { - r.buffer.Reject(batch) - return err - } - r.buffer.Accept(batch) - - return nil + return err } func (r *RunningOutput) writeMetrics(metrics []telegraf.Metric) error { @@ -367,6 +362,26 @@ func (r *RunningOutput) writeMetrics(metrics []telegraf.Metric) error { return err } +func (r *RunningOutput) updateTransaction(tx *Transaction, err error) { + // No error indicates all metrics were written successfully + if err == nil { + tx.AcceptAll() + return + } + + // A non-partial-write-error indicated none of the metrics were written + // successfully and we should keep them for the next write cycle + var writeErr *internal.PartialWriteError + if !errors.As(err, &writeErr) { + tx.KeepAll() + return + } + + // Transfer the accepted and rejected indices based on the write error values + tx.Accept = writeErr.MetricsAccept + tx.Reject = writeErr.MetricsReject +} + func (r *RunningOutput) LogBufferStatus() { nBuffer := r.buffer.Len() if r.Config.BufferStrategy == "disk" { diff --git a/models/running_output_test.go b/models/running_output_test.go index c045dcf0140f2..3c8b9e5951e1a 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -433,6 +433,7 @@ func TestRunningOutputInternalMetrics(t *testing.T) { "buffer_size": 0, "errors": 0, "metrics_added": 0, + "metrics_rejected": 0, "metrics_dropped": 0, "metrics_filtered": 0, "metrics_written": 0, From 0c74b68f55887e8771bb3441f250cdb9f4985c5b Mon Sep 17 00:00:00 2001 From: Mingyang Zheng Date: Thu, 5 Dec 2024 06:30:10 -0800 Subject: [PATCH 047/170] fix(logging): Clean up extra empty spaces when redirectLogger is used (#16255) --- logger/handler.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/logger/handler.go b/logger/handler.go index 6bc066658d183..76bf64a9c32d8 100644 --- a/logger/handler.go +++ b/logger/handler.go @@ -119,10 +119,15 @@ func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix str for k, v := range attr { parts = append(parts, fmt.Sprintf("%s=%v", k, v)) } - attrMsg = " (" + strings.Join(parts, ",") + ")" + attrMsg = "(" + strings.Join(parts, ",") + ")" } - msg := append([]interface{}{ts.In(time.UTC).Format(time.RFC3339), " ", level.Indicator(), " ", prefix + attrMsg}, args...) + msg := []interface{}{ts.In(time.UTC).Format(time.RFC3339), level.Indicator(), prefix + attrMsg} + if prefix+attrMsg != "" { + msg = append(msg, prefix+attrMsg) + } + msg = append(msg, args...) + fmt.Fprintln(l.writer, msg...) } From 3fb767e4a7b67ee3dbd3e7a4362af30b72159307 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 08:30:42 -0600 Subject: [PATCH 048/170] chore(deps): Bump cloud.google.com/go/storage from 1.43.0 to 1.47.0 (#16235) --- docs/LICENSE_OF_DEPENDENCIES.md | 12 +++++++++ go.mod | 19 +++++++++++--- go.sum | 45 +++++++++++++++++++++++++++------ 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ae4696ad3cee3..b8994365b6154 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -3,6 +3,7 @@ When distributed in a binary form, Telegraf may contain portions of the following works: +- cel.dev/expr [Apache License 2.0](https://github.com/google/cel-spec/blob/master/LICENSE) - cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE) - code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) - collectd.org [ISC License](https://github.com/collectd/go-collectd/blob/master/LICENSE) @@ -27,6 +28,9 @@ following works: - github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) - github.com/AzureAD/microsoft-authentication-library-for-go [MIT License](https://github.com/AzureAD/microsoft-authentication-library-for-go/blob/main/LICENSE) - github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp [Apache License 2.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric [Apache License 2.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping [Apache License 2.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE) - github.com/IBM/nzgo [MIT License](https://github.com/IBM/nzgo/blob/master/LICENSE.md) - github.com/IBM/sarama [MIT License](https://github.com/IBM/sarama/blob/master/LICENSE.md) - github.com/JohnCGriffin/overflow [MIT License](https://github.com/JohnCGriffin/overflow/blob/master/README.md) @@ -95,10 +99,12 @@ following works: - github.com/bufbuild/protocompile [Apache License 2.0](https://github.com/bufbuild/protocompile/blob/main/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/census-instrumentation/opencensus-proto [Apache License 2.0](https://github.com/census-instrumentation/opencensus-proto/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/clarify/clarify-go [Apache License 2.0](https://github.com/clarify/clarify-go/blob/master/LICENSE) - github.com/cloudevents/sdk-go [Apache License 2.0](https://github.com/cloudevents/sdk-go/blob/main/LICENSE) +- github.com/cncf/xds/go [Apache License 2.0](https://github.com/cncf/xds/blob/main/LICENSE) - github.com/compose-spec/compose-go [Apache License 2.0](https://github.com/compose-spec/compose-go/blob/master/LICENSE) - github.com/containerd/log [Apache License 2.0](https://github.com/containerd/log/blob/main/LICENSE) - github.com/containerd/platforms [Apache License 2.0](https://github.com/containerd/platforms/blob/main/LICENSE) @@ -132,6 +138,8 @@ following works: - github.com/eclipse/paho.golang [Eclipse Public License - v 2.0](https://github.com/eclipse/paho.golang/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 2.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) - github.com/emicklei/go-restful [MIT License](https://github.com/emicklei/go-restful/blob/v3/LICENSE) +- github.com/envoyproxy/go-control-plane [Apache License 2.0](https://github.com/envoyproxy/go-control-plane/blob/main/LICENSE) +- github.com/envoyproxy/protoc-gen-validate [Apache License 2.0](https://github.com/bufbuild/protoc-gen-validate/blob/main/LICENSE) - github.com/facebook/time [Apache License 2.0](https://github.com/facebook/time/blob/main/LICENSE) - github.com/fatih/color [MIT License](https://github.com/fatih/color/blob/master/LICENSE.md) - github.com/felixge/httpsnoop [MIT License](https://github.com/felixge/httpsnoop/blob/master/LICENSE.txt) @@ -391,10 +399,13 @@ following works: - go.opentelemetry.io/collector/consumer [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) - go.opentelemetry.io/collector/pdata [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) - go.opentelemetry.io/collector/semconv [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) +- go.opentelemetry.io/contrib/detectors/gcp [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/LICENSE) - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/LICENSE) - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/LICENSE) - go.opentelemetry.io/otel [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) - go.opentelemetry.io/otel/metric [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) +- go.opentelemetry.io/otel/sdk [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) +- go.opentelemetry.io/otel/sdk/metric [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) - go.opentelemetry.io/otel/trace [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) - go.opentelemetry.io/proto/otlp [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-proto-go/blob/main/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) @@ -420,6 +431,7 @@ following works: - google.golang.org/genproto/googleapis/api [Apache License 2.0](https://pkg.go.dev/google.golang.org/genproto/googleapis/api?tab=licenses) - google.golang.org/genproto/googleapis/rpc [Apache License 2.0](https://pkg.go.dev/google.golang.org/genproto/googleapis/rpc?tab=licenses) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) +- google.golang.org/grpc/stats/opentelemetry [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) - google.golang.org/protobuf [BSD 3-Clause "New" or "Revised" License](https://pkg.go.dev/google.golang.org/protobuf?tab=licenses) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) diff --git a/go.mod b/go.mod index 4f15e93118e2d..1c2ca658754f1 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/monitoring v1.21.1 cloud.google.com/go/pubsub v1.45.1 - cloud.google.com/go/storage v1.43.0 + cloud.google.com/go/storage v1.47.0 collectd.org v0.6.0 github.com/99designs/keyring v1.2.2 github.com/Azure/azure-event-hubs-go/v3 v3.6.2 @@ -208,7 +208,7 @@ require ( go.mongodb.org/mongo-driver v1.17.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 - go.opentelemetry.io/otel/sdk/metric v1.27.0 + go.opentelemetry.io/otel/sdk/metric v1.29.0 go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net v0.0.0-20240925182052-1207426daebd go.step.sm/crypto v0.54.0 @@ -238,9 +238,10 @@ require ( ) require ( + cel.dev/expr v0.16.1 // indirect cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect @@ -264,6 +265,9 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect @@ -301,8 +305,10 @@ require ( github.com/caio/go-tdigest/v4 v4.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/couchbase/gomemcached v0.1.3 // indirect @@ -322,6 +328,8 @@ require ( github.com/ebitengine/purego v0.8.1 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.4 // indirect @@ -442,6 +450,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.6 // indirect github.com/pkg/xattr v0.4.10 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect @@ -488,6 +497,7 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/consumer v0.101.0 // indirect go.opentelemetry.io/collector/semconv v0.105.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel v1.30.0 // indirect @@ -504,6 +514,7 @@ require ( golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 2200a9f3756ef..d6205d4a2feab 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -99,10 +101,10 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.2 h1:oKF7rgBfSHdp/kuhXtqU/tNDr0mZqhYbEh+6SiqzkKo= +cloud.google.com/go/auth v0.10.2/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -355,6 +357,8 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6 cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= @@ -550,8 +554,8 @@ cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeL cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +cloud.google.com/go/storage v1.47.0 h1:ajqgt30fnOMmLfWfu1PWcb+V9Dxz6n+9WKjdNg5R4HM= +cloud.google.com/go/storage v1.47.0/go.mod h1:Ks0vP374w0PW6jOUameJbapbQKXqkjGd/OJRp2fb9IQ= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -571,6 +575,8 @@ cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -719,6 +725,14 @@ github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Files-com/files-sdk-go/v3 v3.2.34 h1:j6gSzu6BF1wWH1z4itRe7eKhQSCrx/I78SDNiBBUtvI= github.com/Files-com/files-sdk-go/v3 v3.2.34/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 h1:TBb4IxmBH0ssmWTUg0C6c9ZnfDmZospTF8f+YbHnbbA= @@ -996,6 +1010,7 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -1038,6 +1053,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9W0= @@ -1160,10 +1177,14 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc h1:0VQsg5ZXW9MPUxzemUHW7UBK8gfIO8K+YJGbdv4kBIM= github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc/go.mod h1:2UFAomOuD2vAK1x68czUtCVjAqmyWCEnAXOlmGqf+G0= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -2087,6 +2108,8 @@ github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -2442,6 +2465,8 @@ go.opentelemetry.io/collector/pdata/testdata v0.101.0 h1:JzeUtg5RN1iIFgY8DakGlqB go.opentelemetry.io/collector/pdata/testdata v0.101.0/go.mod h1:ZGobfCus4fWo5RduZ7ENI0+HD9BewgKuO6qU2rBVnUg= go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= @@ -2454,12 +2479,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9RO go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -3272,6 +3299,8 @@ google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpX google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 598079ad64f7ec14705a5e8478968df9f81b2872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 5 Dec 2024 15:31:11 +0100 Subject: [PATCH 049/170] chore(deps): Bump golangci-lint from v1.62.0 to v1.62.2 (#16250) --- .circleci/config.yml | 6 +++--- .golangci.yml | 9 ++++----- Makefile | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9ba7e9c4fed15..20568a9c4284f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -106,7 +106,7 @@ jobs: - run: 'make check-deps' - run: name: "Install golangci-lint" - command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 - run: name: "golangci-lint/Linux" # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number @@ -120,7 +120,7 @@ jobs: - check-changed-files-or-halt - run: name: "Install golangci-lint" - command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 - run: name: "golangci-lint/macOS" # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number @@ -134,7 +134,7 @@ jobs: - check-changed-files-or-halt - run: name: "Install golangci-lint" - command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 - run: name: "golangci-lint/Windows" # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number diff --git a/.golangci.yml b/.golangci.yml index 9821917770265..a7eab4390f758 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -284,10 +284,9 @@ linters-settings: - name: import-shadowing - name: increment-decrement - name: indent-error-flow -# Enable again when https://github.com/mgechev/revive/issues/1103 is fixed -# - name: max-public-structs -# exclude: [ "TEST" ] -# arguments: [ 5 ] + - name: max-public-structs + exclude: [ "TEST" ] + arguments: [ 5 ] - name: modifies-parameter - name: modifies-value-receiver - name: optimize-operands-order @@ -392,7 +391,7 @@ issues: text: "Use of weak random number generator" #gosec:G404 - path-except: ^plugins/(aggregators|inputs|outputs|parsers|processors|serializers)/... - text: "max-public-structs: you have exceeded the maximum number of public struct declarations" #revive:max-public-structs + text: "max-public-structs: you have exceeded the maximum number" #revive:max-public-structs # Independently of option `exclude` we use default exclude patterns, # it can be disabled by this option. diff --git a/Makefile b/Makefile index 543bd9ae2f537..71ac0a668c316 100644 --- a/Makefile +++ b/Makefile @@ -180,7 +180,7 @@ vet: .PHONY: lint-install lint-install: @echo "Installing golangci-lint" - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 @echo "Installing markdownlint" npm install -g markdownlint-cli From 18cdb1a99e51b75003272b745e148e14d284947f Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 5 Dec 2024 15:32:10 +0100 Subject: [PATCH 050/170] chore(serializers)!: Remove old-style creation (#15971) --- CHANGELOG.md | 4 + config/config.go | 9 - config/config_test.go | 125 -------------- models/running_serializer.go | 5 +- plugins/outputs/amqp/amqp.go | 5 +- .../azure_data_explorer.go | 3 +- plugins/outputs/cloud_pubsub/cloud_pubsub.go | 5 +- plugins/outputs/event_hubs/event_hubs.go | 5 +- plugins/outputs/exec/exec.go | 5 +- plugins/outputs/execd/execd.go | 5 +- plugins/outputs/file/file.go | 5 +- plugins/outputs/http/http.go | 5 +- plugins/outputs/http/http_test.go | 3 +- plugins/outputs/kafka/kafka.go | 5 +- plugins/outputs/kinesis/kinesis.go | 5 +- plugins/outputs/kinesis/kinesis_test.go | 13 +- plugins/outputs/mqtt/mqtt.go | 5 +- plugins/outputs/nats/nats.go | 5 +- plugins/outputs/nsq/nsq.go | 5 +- .../outputs/socket_writer/socket_writer.go | 9 +- .../socket_writer/socket_writer_test.go | 12 +- plugins/outputs/stomp/stomp.go | 5 +- plugins/outputs/sumologic/sumologic.go | 7 +- plugins/outputs/websocket/websocket.go | 5 +- plugins/processors/execd/execd.go | 3 +- plugins/serializers/binary/binary.go | 2 +- plugins/serializers/carbon2/carbon2.go | 10 +- .../serializers/cloudevents/cloudevents.go | 2 +- plugins/serializers/csv/csv.go | 12 +- plugins/serializers/graphite/graphite.go | 14 +- plugins/serializers/influx/influx.go | 11 +- plugins/serializers/json/json.go | 13 +- plugins/serializers/msgpack/msgpack.go | 7 +- plugins/serializers/nowmetric/nowmetric.go | 7 +- plugins/serializers/prometheus/prometheus.go | 12 +- .../prometheusremotewrite.go | 10 +- plugins/serializers/registry.go | 163 +----------------- .../serializers/splunkmetric/splunkmetric.go | 11 +- plugins/serializers/template/template.go | 2 +- plugins/serializers/wavefront/wavefront.go | 12 +- 40 files changed, 68 insertions(+), 483 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 32648b4f53bb4..ceae796d6ef68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,10 @@ delivery state update of un-parseable messages from `ACK` to `NACK` without requeueing. This way, those messages are not lost and can optionally be handled using a dead-letter exchange by other means. +- Removal of old-style serializer creation. This should not directly affect + users as it is an API change. All serializers in Telegraf are already ported + to the new framework. If you experience any issues with not being able to + create serializers let us know! ### Bugfixes diff --git a/config/config.go b/config/config.go index d80033a6050f1..3ae2025313b4c 100644 --- a/config/config.go +++ b/config/config.go @@ -1212,15 +1212,6 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } t.SetSerializer(serializer) - } else if t, ok := output.(serializers.SerializerOutput); ok { - // Keep the old interface for backward compatibility - // DEPRECATED: Please switch your plugin to telegraf.Serializers - missThreshold = 1 - serializer, err := c.addSerializer(name, table) - if err != nil { - return err - } - t.SetSerializer(serializer) } if t, ok := output.(telegraf.SerializerFuncPlugin); ok { diff --git a/config/config_test.go b/config/config_test.go index 7af2a790a5772..ace97545d0bf0 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -630,7 +630,6 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { require.NoError(t, c.LoadConfig("./testdata/serializers_new.toml")) require.Len(t, c.Outputs, len(formats)) - cfg := serializers.Config{} override := map[string]struct { param map[string]interface{} mask []string @@ -638,20 +637,12 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { expected := make([]telegraf.Serializer, 0, len(formats)) for _, format := range formats { - formatCfg := &cfg - formatCfg.DataFormat = format - logger := logging.New("serializers", format, "test") var serializer telegraf.Serializer if creator, found := serializers.Serializers[format]; found { t.Logf("new-style %q", format) serializer = creator() - } else { - t.Logf("old-style %q", format) - var err error - serializer, err = serializers.NewSerializer(formatCfg) - require.NoErrorf(t, err, "No serializer for format %q", format) } if settings, found := override[format]; found { @@ -703,98 +694,6 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { } } -func TestConfig_SerializerInterfaceOldFormat(t *testing.T) { - formats := []string{ - "carbon2", - "csv", - "graphite", - "influx", - "json", - "msgpack", - "nowmetric", - "prometheus", - "prometheusremotewrite", - "splunkmetric", - "wavefront", - } - - c := config.NewConfig() - require.NoError(t, c.LoadConfig("./testdata/serializers_old.toml")) - require.Len(t, c.Outputs, len(formats)) - - cfg := serializers.Config{} - override := map[string]struct { - param map[string]interface{} - mask []string - }{} - - expected := make([]telegraf.Serializer, 0, len(formats)) - for _, format := range formats { - formatCfg := &cfg - formatCfg.DataFormat = format - - logger := logging.New("serializers", format, "test") - - var serializer serializers.Serializer - if creator, found := serializers.Serializers[format]; found { - t.Logf("new-style %q", format) - serializer = creator() - } else { - t.Logf("old-style %q", format) - var err error - serializer, err = serializers.NewSerializer(formatCfg) - require.NoErrorf(t, err, "No serializer for format %q", format) - } - - if settings, found := override[format]; found { - s := reflect.Indirect(reflect.ValueOf(serializer)) - for key, value := range settings.param { - v := reflect.ValueOf(value) - s.FieldByName(key).Set(v) - } - } - models.SetLoggerOnPlugin(serializer, logger) - if s, ok := serializer.(telegraf.Initializer); ok { - require.NoError(t, s.Init()) - } - expected = append(expected, serializer) - } - require.Len(t, expected, len(formats)) - - actual := make([]interface{}, 0) - for _, plugin := range c.Outputs { - output, ok := plugin.Output.(*MockupOutputPluginSerializerOld) - require.True(t, ok) - // Get the parser set with 'SetParser()' - if p, ok := output.Serializer.(*models.RunningSerializer); ok { - actual = append(actual, p.Serializer) - } else { - actual = append(actual, output.Serializer) - } - } - require.Len(t, actual, len(formats)) - - for i, format := range formats { - // Determine the underlying type of the serializer - stype := reflect.Indirect(reflect.ValueOf(expected[i])).Interface() - // Ignore all unexported fields and fields not relevant for functionality - options := []cmp.Option{ - cmpopts.IgnoreUnexported(stype), - cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(serializers_prometheus.MetricTypes{})).Interface()), - cmpopts.IgnoreTypes(sync.Mutex{}, regexp.Regexp{}), - cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), - } - if settings, found := override[format]; found { - options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) - } - - // Do a manual comparison as require.EqualValues will also work on unexported fields - // that cannot be cleared or ignored. - diff := cmp.Diff(expected[i], actual[i], options...) - require.Emptyf(t, diff, "Difference in SetSerializer() for %q", format) - } -} - func TestConfig_ParserInterface(t *testing.T) { formats := []string{ "collectd", @@ -1503,27 +1402,6 @@ func (m *MockupOutputPlugin) Write(_ []telegraf.Metric) error { return nil } -// Mockup OUTPUT plugin for serializer testing to avoid cyclic dependencies -type MockupOutputPluginSerializerOld struct { - Serializer serializers.Serializer -} - -func (m *MockupOutputPluginSerializerOld) SetSerializer(s serializers.Serializer) { - m.Serializer = s -} -func (*MockupOutputPluginSerializerOld) Connect() error { - return nil -} -func (*MockupOutputPluginSerializerOld) Close() error { - return nil -} -func (*MockupOutputPluginSerializerOld) SampleConfig() string { - return "Mockup test output plugin" -} -func (*MockupOutputPluginSerializerOld) Write(_ []telegraf.Metric) error { - return nil -} - type MockupOutputPluginSerializerNew struct { Serializer telegraf.Serializer } @@ -1662,7 +1540,4 @@ func init() { outputs.Add("serializer_test_new", func() telegraf.Output { return &MockupOutputPluginSerializerNew{} }) - outputs.Add("serializer_test_old", func() telegraf.Output { - return &MockupOutputPluginSerializerOld{} - }) } diff --git a/models/running_serializer.go b/models/running_serializer.go index d7d0217953363..e2efa0092c114 100644 --- a/models/running_serializer.go +++ b/models/running_serializer.go @@ -5,7 +5,6 @@ import ( "github.com/influxdata/telegraf" logging "github.com/influxdata/telegraf/logger" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/selfstat" ) @@ -19,7 +18,7 @@ type SerializerConfig struct { } type RunningSerializer struct { - Serializer serializers.Serializer + Serializer telegraf.Serializer Config *SerializerConfig log telegraf.Logger @@ -28,7 +27,7 @@ type RunningSerializer struct { SerializationTime selfstat.Stat } -func NewRunningSerializer(serializer serializers.Serializer, config *SerializerConfig) *RunningSerializer { +func NewRunningSerializer(serializer telegraf.Serializer, config *SerializerConfig) *RunningSerializer { tags := map[string]string{"type": config.DataFormat} if config.Alias != "" { tags["alias"] = config.Alias diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index b41fe32a216a9..4270027bfa2c7 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -17,7 +17,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -67,7 +66,7 @@ type AMQP struct { tls.ClientConfig proxy.TCPProxy - serializer serializers.Serializer + serializer telegraf.Serializer connect func(*ClientConfig) (Client, error) client Client config *ClientConfig @@ -84,7 +83,7 @@ func (*AMQP) SampleConfig() string { return sampleConfig } -func (q *AMQP) SetSerializer(serializer serializers.Serializer) { +func (q *AMQP) SetSerializer(serializer telegraf.Serializer) { q.serializer = serializer } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index c89e56f9ca6e8..1c6cf4f1e8417 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -20,7 +20,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/json" ) @@ -36,7 +35,7 @@ type AzureDataExplorer struct { TableName string `toml:"table_name"` CreateTables bool `toml:"create_tables"` IngestionType string `toml:"ingestion_type"` - serializer serializers.Serializer + serializer telegraf.Serializer kustoClient *kusto.Client metricIngestors map[string]ingest.Ingestor } diff --git a/plugins/outputs/cloud_pubsub/cloud_pubsub.go b/plugins/outputs/cloud_pubsub/cloud_pubsub.go index 259ff37b6e77b..263a57f9826e8 100644 --- a/plugins/outputs/cloud_pubsub/cloud_pubsub.go +++ b/plugins/outputs/cloud_pubsub/cloud_pubsub.go @@ -18,7 +18,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -45,7 +44,7 @@ type PubSub struct { stubTopic func(id string) topic - serializer serializers.Serializer + serializer telegraf.Serializer publishResults []publishResult encoder internal.ContentEncoder } @@ -54,7 +53,7 @@ func (*PubSub) SampleConfig() string { return sampleConfig } -func (ps *PubSub) SetSerializer(serializer serializers.Serializer) { +func (ps *PubSub) SetSerializer(serializer telegraf.Serializer) { ps.serializer = serializer } diff --git a/plugins/outputs/event_hubs/event_hubs.go b/plugins/outputs/event_hubs/event_hubs.go index f7e0695ef7ab2..a5d0b6861d367 100644 --- a/plugins/outputs/event_hubs/event_hubs.go +++ b/plugins/outputs/event_hubs/event_hubs.go @@ -11,7 +11,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -62,7 +61,7 @@ type EventHubs struct { Hub EventHubInterface batchOptions []eventhub.BatchOption - serializer serializers.Serializer + serializer telegraf.Serializer } const ( @@ -104,7 +103,7 @@ func (e *EventHubs) Close() error { return nil } -func (e *EventHubs) SetSerializer(serializer serializers.Serializer) { +func (e *EventHubs) SetSerializer(serializer telegraf.Serializer) { e.serializer = serializer } diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 08fed06e8b9ad..350c7357c5157 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -33,7 +32,7 @@ type Exec struct { Log telegraf.Logger `toml:"-"` runner Runner - serializer serializers.Serializer + serializer telegraf.Serializer } func (*Exec) SampleConfig() string { @@ -47,7 +46,7 @@ func (e *Exec) Init() error { } // SetSerializer sets the serializer for the output. -func (e *Exec) SetSerializer(serializer serializers.Serializer) { +func (e *Exec) SetSerializer(serializer telegraf.Serializer) { e.serializer = serializer } diff --git a/plugins/outputs/execd/execd.go b/plugins/outputs/execd/execd.go index f764d142e4625..f76aaee76e8fa 100644 --- a/plugins/outputs/execd/execd.go +++ b/plugins/outputs/execd/execd.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/process" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -29,14 +28,14 @@ type Execd struct { Log telegraf.Logger process *process.Process - serializer serializers.Serializer + serializer telegraf.Serializer } func (*Execd) SampleConfig() string { return sampleConfig } -func (e *Execd) SetSerializer(s serializers.Serializer) { +func (e *Execd) SetSerializer(s telegraf.Serializer) { e.serializer = s } diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index b005026c97a12..8987a5441d397 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -13,7 +13,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -32,14 +31,14 @@ type File struct { encoder internal.ContentEncoder writer io.Writer closers []io.Closer - serializer serializers.Serializer + serializer telegraf.Serializer } func (*File) SampleConfig() string { return sampleConfig } -func (f *File) SetSerializer(serializer serializers.Serializer) { +func (f *File) SetSerializer(serializer telegraf.Serializer) { f.serializer = serializer } diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 3063535135f99..59d6f3d06f819 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -25,7 +25,6 @@ import ( common_aws "github.com/influxdata/telegraf/plugins/common/aws" common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -56,7 +55,7 @@ type HTTP struct { Log telegraf.Logger `toml:"-"` client *http.Client - serializer serializers.Serializer + serializer telegraf.Serializer awsCfg *aws.Config common_aws.CredentialConfig @@ -70,7 +69,7 @@ func (*HTTP) SampleConfig() string { return sampleConfig } -func (h *HTTP) SetSerializer(serializer serializers.Serializer) { +func (h *HTTP) SetSerializer(serializer telegraf.Serializer) { h.serializer = serializer } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 52ca1cd29a7b7..0bba3b23dd099 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -21,7 +21,6 @@ import ( common_aws "github.com/influxdata/telegraf/plugins/common/aws" common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/oauth" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" "github.com/influxdata/telegraf/testutil" @@ -735,7 +734,7 @@ func TestBatchedUnbatched(t *testing.T) { jsonSerializer := &json.Serializer{} require.NoError(t, jsonSerializer.Init()) - s := map[string]serializers.Serializer{ + s := map[string]telegraf.Serializer{ "influx": influxSerializer, "json": jsonSerializer, } diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 13e3de3b831ea..cec722cd1584e 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -56,7 +55,7 @@ type Kafka struct { producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) producer sarama.SyncProducer - serializer serializers.Serializer + serializer telegraf.Serializer } type TopicSuffix struct { @@ -114,7 +113,7 @@ func (k *Kafka) GetTopicName(metric telegraf.Metric) (telegraf.Metric, string) { return metric, topicName } -func (k *Kafka) SetSerializer(serializer serializers.Serializer) { +func (k *Kafka) SetSerializer(serializer telegraf.Serializer) { k.serializer = serializer } diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index b8306efea9519..73c38e8a8b9b3 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf" common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -32,7 +31,7 @@ type ( Debug bool `toml:"debug"` Log telegraf.Logger `toml:"-"` - serializer serializers.Serializer + serializer telegraf.Serializer svc kinesisClient common_aws.CredentialConfig @@ -86,7 +85,7 @@ func (k *KinesisOutput) Close() error { return nil } -func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { +func (k *KinesisOutput) SetSerializer(serializer telegraf.Serializer) { k.serializer = serializer } diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 00f7f73c655ea..acc15d6734492 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -566,11 +565,7 @@ func (m *mockKinesisPutRecords) AssertRequests( } } -func createTestMetric( - t *testing.T, - name string, - serializer serializers.Serializer, -) (telegraf.Metric, []byte) { +func createTestMetric(t *testing.T, name string, serializer telegraf.Serializer) (telegraf.Metric, []byte) { metric := testutil.TestMetric(1, name) data, err := serializer.Serialize(metric) @@ -579,11 +574,7 @@ func createTestMetric( return metric, data } -func createTestMetrics( - t *testing.T, - count uint32, - serializer serializers.Serializer, -) ([]telegraf.Metric, [][]byte) { +func createTestMetrics(t *testing.T, count uint32, serializer telegraf.Serializer) ([]telegraf.Metric, [][]byte) { metrics := make([]telegraf.Metric, 0, count) metricsData := make([][]byte, 0, count) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index df57c4414191d..9e710c8468184 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/mqtt" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -36,7 +35,7 @@ type MQTT struct { mqtt.MqttConfig client mqtt.Client - serializer serializers.Serializer + serializer telegraf.Serializer generator *TopicNameGenerator homieDeviceNameGenerator *HomieGenerator @@ -118,7 +117,7 @@ func (m *MQTT) Connect() error { return err } -func (m *MQTT) SetSerializer(serializer serializers.Serializer) { +func (m *MQTT) SetSerializer(serializer telegraf.Serializer) { m.serializer = serializer } diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index ace7901fd6766..d00598d752115 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -17,7 +17,6 @@ import ( "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -39,7 +38,7 @@ type NATS struct { conn *nats.Conn jetstreamClient jetstream.JetStream jetstreamStreamConfig *jetstream.StreamConfig - serializer serializers.Serializer + serializer telegraf.Serializer } // StreamConfig is the configuration for creating stream @@ -83,7 +82,7 @@ func (*NATS) SampleConfig() string { return sampleConfig } -func (n *NATS) SetSerializer(serializer serializers.Serializer) { +func (n *NATS) SetSerializer(serializer telegraf.Serializer) { n.serializer = serializer } diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 2c771b1919507..ee66d7f4e0853 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -9,7 +9,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -21,14 +20,14 @@ type NSQ struct { Log telegraf.Logger `toml:"-"` producer *nsq.Producer - serializer serializers.Serializer + serializer telegraf.Serializer } func (*NSQ) SampleConfig() string { return sampleConfig } -func (n *NSQ) SetSerializer(serializer serializers.Serializer) { +func (n *NSQ) SetSerializer(serializer telegraf.Serializer) { n.serializer = serializer } diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index b2aec04b97bc6..26e7ce60dd50f 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -19,7 +19,6 @@ import ( "github.com/influxdata/telegraf/internal" common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -32,7 +31,7 @@ type SocketWriter struct { common_tls.ClientConfig Log telegraf.Logger `toml:"-"` - serializers.Serializer + serializer telegraf.Serializer encoder internal.ContentEncoder @@ -43,8 +42,8 @@ func (*SocketWriter) SampleConfig() string { return sampleConfig } -func (sw *SocketWriter) SetSerializer(s serializers.Serializer) { - sw.Serializer = s +func (sw *SocketWriter) SetSerializer(s telegraf.Serializer) { + sw.serializer = s } func (sw *SocketWriter) Connect() error { @@ -141,7 +140,7 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { } for _, m := range metrics { - bs, err := sw.Serialize(m) + bs, err := sw.serializer.Serialize(m) if err != nil { sw.Log.Debugf("Could not serialize metric: %v", err) continue diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 478d05a31ac78..c2de7a62f9709 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -19,7 +19,7 @@ func newSocketWriter(t *testing.T, addr string) *SocketWriter { require.NoError(t, serializer.Init()) return &SocketWriter{ Address: addr, - Serializer: serializer, + serializer: serializer, } } @@ -79,12 +79,12 @@ func TestSocketWriter_unixgram(t *testing.T) { func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { metrics := []telegraf.Metric{testutil.TestMetric(1, "test")} - mbs1out, err := sw.Serialize(metrics[0]) + mbs1out, err := sw.serializer.Serialize(metrics[0]) require.NoError(t, err) mbs1out, err = sw.encoder.Encode(mbs1out) require.NoError(t, err) metrics = append(metrics, testutil.TestMetric(2, "test")) - mbs2out, err := sw.Serialize(metrics[1]) + mbs2out, err := sw.serializer.Serialize(metrics[1]) require.NoError(t, err) mbs2out, err = sw.encoder.Encode(mbs2out) require.NoError(t, err) @@ -104,13 +104,13 @@ func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { func testSocketWriterPacket(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { metrics := []telegraf.Metric{testutil.TestMetric(1, "test")} - mbs1out, err := sw.Serialize(metrics[0]) + mbs1out, err := sw.serializer.Serialize(metrics[0]) require.NoError(t, err) mbs1out, err = sw.encoder.Encode(mbs1out) require.NoError(t, err) mbs1str := string(mbs1out) metrics = append(metrics, testutil.TestMetric(2, "test")) - mbs2out, err := sw.Serialize(metrics[1]) + mbs2out, err := sw.serializer.Serialize(metrics[1]) require.NoError(t, err) mbs2out, err = sw.encoder.Encode(mbs2out) require.NoError(t, err) @@ -191,7 +191,7 @@ func TestSocketWriter_Write_reconnect(t *testing.T) { wg.Wait() require.NoError(t, lerr) - mbsout, err := sw.Serialize(metrics[0]) + mbsout, err := sw.serializer.Serialize(metrics[0]) require.NoError(t, err) buf := make([]byte, 256) n, err := lconn.Read(buf) diff --git a/plugins/outputs/stomp/stomp.go b/plugins/outputs/stomp/stomp.go index 7e12762d0b600..4433abee8c678 100644 --- a/plugins/outputs/stomp/stomp.go +++ b/plugins/outputs/stomp/stomp.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf/config" common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -35,7 +34,7 @@ type STOMP struct { conn net.Conn stomp *stomp.Conn - serialize serializers.Serializer + serialize telegraf.Serializer } func (q *STOMP) Connect() error { @@ -71,7 +70,7 @@ func (q *STOMP) Connect() error { return nil } -func (q *STOMP) SetSerializer(serializer serializers.Serializer) { +func (q *STOMP) SetSerializer(serializer telegraf.Serializer) { q.serialize = serializer } diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 37e4798da7e35..a01033fb39c8b 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/prometheus" @@ -57,7 +56,7 @@ type SumoLogic struct { Log telegraf.Logger `toml:"-"` client *http.Client - serializer serializers.Serializer + serializer telegraf.Serializer headers map[string]string } @@ -66,7 +65,7 @@ func (*SumoLogic) SampleConfig() string { return sampleConfig } -func (s *SumoLogic) SetSerializer(serializer serializers.Serializer) { +func (s *SumoLogic) SetSerializer(serializer telegraf.Serializer) { s.serializer = serializer } @@ -82,7 +81,7 @@ func (s *SumoLogic) createClient() *http.Client { func (s *SumoLogic) Connect() error { s.headers = make(map[string]string) - var serializer serializers.Serializer + var serializer telegraf.Serializer if unwrapped, ok := s.serializer.(*models.RunningSerializer); ok { serializer = unwrapped.Serializer } else { diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go index f772c159c0e05..c8767c592100b 100644 --- a/plugins/outputs/websocket/websocket.go +++ b/plugins/outputs/websocket/websocket.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -42,7 +41,7 @@ type WebSocket struct { tls.ClientConfig conn *ws.Conn - serializer serializers.Serializer + serializer telegraf.Serializer } func (*WebSocket) SampleConfig() string { @@ -50,7 +49,7 @@ func (*WebSocket) SampleConfig() string { } // SetSerializer implements serializers.SerializerOutput. -func (w *WebSocket) SetSerializer(serializer serializers.Serializer) { +func (w *WebSocket) SetSerializer(serializer telegraf.Serializer) { w.serializer = serializer } diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 0407226f9fdda..481ffd35f0db8 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/telegraf/internal/process" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -28,7 +27,7 @@ type Execd struct { Log telegraf.Logger parser telegraf.Parser - serializer serializers.Serializer + serializer telegraf.Serializer acc telegraf.Accumulator process *process.Process } diff --git a/plugins/serializers/binary/binary.go b/plugins/serializers/binary/binary.go index 754a1b1710a3a..ce4148813b2be 100644 --- a/plugins/serializers/binary/binary.go +++ b/plugins/serializers/binary/binary.go @@ -101,7 +101,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("binary", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go index 5bd3c250348b4..827301fa7c8b6 100644 --- a/plugins/serializers/carbon2/carbon2.go +++ b/plugins/serializers/carbon2/carbon2.go @@ -112,16 +112,8 @@ func (s *Serializer) createObject(metric telegraf.Metric) []byte { func init() { serializers.Add("carbon2", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.Format = cfg.Carbon2Format - s.SanitizeReplaceChar = cfg.Carbon2SanitizeReplaceChar - - return nil -} diff --git a/plugins/serializers/cloudevents/cloudevents.go b/plugins/serializers/cloudevents/cloudevents.go index 0112d9fe4c084..a5dc2dcc54659 100644 --- a/plugins/serializers/cloudevents/cloudevents.go +++ b/plugins/serializers/cloudevents/cloudevents.go @@ -192,7 +192,7 @@ func (s *Serializer) createEvent(m telegraf.Metric) (*cloudevents.Event, error) func init() { serializers.Add("cloudevents", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) diff --git a/plugins/serializers/csv/csv.go b/plugins/serializers/csv/csv.go index d9ba94f74db9d..7931270f4b567 100644 --- a/plugins/serializers/csv/csv.go +++ b/plugins/serializers/csv/csv.go @@ -238,18 +238,8 @@ func (s *Serializer) writeDataOrdered(metric telegraf.Metric) error { func init() { serializers.Add("csv", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.TimestampFormat = cfg.TimestampFormat - s.Separator = cfg.CSVSeparator - s.Header = cfg.CSVHeader - s.Prefix = cfg.CSVPrefix - - return nil -} diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index cb84ae47ff837..60dc318278822 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -355,20 +355,8 @@ func compatibleSanitize(name, value string) string { func init() { serializers.Add("graphite", - func() serializers.Serializer { + func() telegraf.Serializer { return &GraphiteSerializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *GraphiteSerializer) InitFromConfig(cfg *serializers.Config) error { - s.Prefix = cfg.Prefix - s.Templates = cfg.Templates - s.StrictRegex = cfg.GraphiteStrictRegex - s.TagSupport = cfg.GraphiteTagSupport - s.TagSanitizeMode = cfg.GraphiteTagSanitizeMode - s.Separator = cfg.GraphiteSeparator - - return nil -} diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index 191fa7e516957..cfe7631a261a9 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -328,17 +328,8 @@ func appendStringField(buf []byte, value string) []byte { func init() { serializers.Add("influx", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.MaxLineBytes = cfg.InfluxMaxLineBytes - s.SortFields = cfg.InfluxSortFields - s.UintSupport = cfg.InfluxUintSupport - - return nil -} diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index e293089886c81..c91281868209a 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -162,19 +162,8 @@ func (s *Serializer) transform(obj interface{}) (interface{}, error) { func init() { serializers.Add("json", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.TimestampUnits = config.Duration(cfg.TimestampUnits) - s.TimestampFormat = cfg.TimestampFormat - s.Transformation = cfg.Transformation - s.NestedFieldsInclude = cfg.JSONNestedFieldInclude - s.NestedFieldsExclude = cfg.JSONNestedFieldExclude - - return nil -} diff --git a/plugins/serializers/msgpack/msgpack.go b/plugins/serializers/msgpack/msgpack.go index fef37e7c056ea..a1bb7346fc82d 100644 --- a/plugins/serializers/msgpack/msgpack.go +++ b/plugins/serializers/msgpack/msgpack.go @@ -40,13 +40,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("msgpack", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(_ *serializers.Config) error { - return nil -} diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go index fd2782959c457..d25e90276e3a1 100644 --- a/plugins/serializers/nowmetric/nowmetric.go +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -130,13 +130,8 @@ func verifyValue(v interface{}) bool { func init() { serializers.Add("nowmetric", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(_ *serializers.Config) error { - return nil -} diff --git a/plugins/serializers/prometheus/prometheus.go b/plugins/serializers/prometheus/prometheus.go index e2a7b34e8543a..21dc136c70524 100644 --- a/plugins/serializers/prometheus/prometheus.go +++ b/plugins/serializers/prometheus/prometheus.go @@ -88,18 +88,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("prometheus", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.FormatConfig.CompactEncoding = cfg.PrometheusCompactEncoding - s.FormatConfig.SortMetrics = cfg.PrometheusSortMetrics - s.FormatConfig.StringAsLabel = cfg.PrometheusStringAsLabel - s.FormatConfig.ExportTimestamp = cfg.PrometheusExportTimestamp - - return nil -} diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index 3f281eaed75a5..413f876cc963e 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -350,16 +350,8 @@ func (sl sortableLabels) Swap(i, j int) { func init() { serializers.Add("prometheusremotewrite", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.SortMetrics = cfg.PrometheusSortMetrics - s.StringAsLabel = cfg.PrometheusStringAsLabel - - return nil -} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 03b9e9d06aa1b..27881d04e11aa 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -1,14 +1,9 @@ package serializers -import ( - "fmt" - "time" - - "github.com/influxdata/telegraf" -) +import "github.com/influxdata/telegraf" // Creator is the function to create a new serializer -type Creator func() Serializer +type Creator func() telegraf.Serializer // Serializers contains the registry of all known serializers (following the new style) var Serializers = make(map[string]Creator) @@ -17,157 +12,3 @@ var Serializers = make(map[string]Creator) func Add(name string, creator Creator) { Serializers[name] = creator } - -// SerializerOutput is an interface for output plugins that are able to -// serialize telegraf metrics into arbitrary data formats. -type SerializerOutput interface { - // SetSerializer sets the serializer function for the interface. - SetSerializer(serializer Serializer) -} - -// Serializer is an interface defining functions that a serializer plugin must -// satisfy. -// -// Implementations of this interface should be reentrant but are not required -// to be thread-safe. -type Serializer interface { - // Serialize takes a single telegraf metric and turns it into a byte buffer. - // separate metrics should be separated by a newline, and there should be - // a newline at the end of the buffer. - // - // New plugins should use SerializeBatch instead to allow for non-line - // delimited metrics. - Serialize(metric telegraf.Metric) ([]byte, error) - - // SerializeBatch takes an array of telegraf metric and serializes it into - // a byte buffer. This method is not required to be suitable for use with - // line oriented framing. - SerializeBatch(metrics []telegraf.Metric) ([]byte, error) -} - -// SerializerCompatibility is an interface for backward-compatible initialization of serializers -type SerializerCompatibility interface { - // InitFromConfig sets the serializers internal variables from the old-style config - InitFromConfig(config *Config) error -} - -// Config is a struct that covers the data types needed for all serializer types, -// and can be used to instantiate _any_ of the serializers. -type Config struct { - // DataFormat can be one of the serializer types listed in NewSerializer. - DataFormat string `toml:"data_format"` - - // Carbon2 metric format. - Carbon2Format string `toml:"carbon2_format"` - - // Character used for metric name sanitization in Carbon2. - Carbon2SanitizeReplaceChar string `toml:"carbon2_sanitize_replace_char"` - - // Separator for CSV - CSVSeparator string `toml:"csv_separator"` - - // Output a CSV header for naming the columns - CSVHeader bool `toml:"csv_header"` - - // Prefix the tag and field columns for CSV format - CSVPrefix bool `toml:"csv_column_prefix"` - - // Support tags in graphite protocol - GraphiteTagSupport bool `toml:"graphite_tag_support"` - - // Support tags which follow the spec - GraphiteTagSanitizeMode string `toml:"graphite_tag_sanitize_mode"` - - // Character for separating metric name and field for Graphite tags - GraphiteSeparator string `toml:"graphite_separator"` - - // Regex string - GraphiteStrictRegex string `toml:"graphite_strict_sanitize_regex"` - - // Maximum line length in bytes; influx format only - InfluxMaxLineBytes int `toml:"influx_max_line_bytes"` - - // Sort field keys, set to true only when debugging as it less performant - // than unsorted fields; influx format only - InfluxSortFields bool `toml:"influx_sort_fields"` - - // Support unsigned integer output; influx format only - InfluxUintSupport bool `toml:"influx_uint_support"` - - // Omit timestamp from output; influx format only - InfluxOmitTimestamp bool `toml:"influx_omit_timestamp"` - - // Prefix to add to all measurements, only supports Graphite - Prefix string `toml:"prefix"` - - // Template for converting telegraf metrics into Graphite - // only supports Graphite - Template string `toml:"template"` - - // Templates same Template, but multiple - Templates []string `toml:"templates"` - - // Timestamp units to use for JSON formatted output - TimestampUnits time.Duration `toml:"timestamp_units"` - - // Timestamp format to use for JSON and CSV formatted output - TimestampFormat string `toml:"timestamp_format"` - - // Transformation as JSONata expression to use for JSON formatted output - Transformation string `toml:"transformation"` - - // Field filter for interpreting data as nested JSON for JSON serializer - JSONNestedFieldInclude []string `toml:"json_nested_fields_include"` - JSONNestedFieldExclude []string `toml:"json_nested_fields_exclude"` - - // Include HEC routing fields for splunkmetric output - HecRouting bool `toml:"hec_routing"` - - // Enable Splunk MultiMetric output (Splunk 8.0+) - SplunkmetricMultiMetric bool `toml:"splunkmetric_multi_metric"` - - // Omit the Splunk Event "metric" tag - SplunkmetricOmitEventTag bool `toml:"splunkmetric_omit_event_tag"` - - // Point tags to use as the source name for Wavefront (if none found, host will be used). - WavefrontSourceOverride []string `toml:"wavefront_source_override"` - - // Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront - // When enabled forward slash (/) and comma (,) will be accepted - WavefrontUseStrict bool `toml:"wavefront_use_strict"` - - // Convert "_" in prefixes to "." for Wavefront - WavefrontDisablePrefixConversion bool `toml:"wavefront_disable_prefix_conversion"` - - // Include the metric timestamp on each sample. - PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"` - - // Sort prometheus metric families and metric samples. Useful for - // debugging. - PrometheusSortMetrics bool `toml:"prometheus_sort_metrics"` - - // Output string fields as metric labels; when false string fields are - // discarded. - PrometheusStringAsLabel bool `toml:"prometheus_string_as_label"` - - // Encode metrics without HELP metadata. This helps reduce the payload size. - PrometheusCompactEncoding bool `toml:"prometheus_compact_encoding"` -} - -// NewSerializer a Serializer interface based on the given config. -func NewSerializer(config *Config) (Serializer, error) { - creator, found := Serializers[config.DataFormat] - if !found { - return nil, fmt.Errorf("invalid data format: %s", config.DataFormat) - } - - // Try to create new-style serializers the old way... - serializer := creator() - p, ok := serializer.(SerializerCompatibility) - if !ok { - return nil, fmt.Errorf("serializer for %q cannot be created the old way", config.DataFormat) - } - err := p.InitFromConfig(config) - - return serializer, err -} diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 4e34ba573d2f6..4eb8b982ae373 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -210,17 +210,8 @@ func verifyValue(v interface{}) (value interface{}, valid bool) { func init() { serializers.Add("splunkmetric", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.HecRouting = cfg.HecRouting - s.MultiMetric = cfg.SplunkmetricMultiMetric - s.OmitEventTag = cfg.SplunkmetricOmitEventTag - - return nil -} diff --git a/plugins/serializers/template/template.go b/plugins/serializers/template/template.go index 0527fd4a9ba20..39ea176e864a7 100644 --- a/plugins/serializers/template/template.go +++ b/plugins/serializers/template/template.go @@ -95,7 +95,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("template", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index f38c97589d298..e326a85d00997 100644 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -189,18 +189,8 @@ func (b *buffer) WriteFloat64(val float64) { func init() { serializers.Add("wavefront", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.Prefix = cfg.Prefix - s.UseStrict = cfg.WavefrontUseStrict - s.SourceOverride = cfg.WavefrontSourceOverride - s.DisablePrefixConversions = cfg.WavefrontDisablePrefixConversion - - return nil -} From 7dc0e18223ecc3ef31843c5b39e629e3b620e778 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 5 Dec 2024 17:35:28 +0100 Subject: [PATCH 051/170] chore: Fix linter findings for `revive:exported` in `plugins/inputs/o*` (#16224) --- plugins/inputs/opcua/opcua.go | 12 +- plugins/inputs/opcua/opcua_test.go | 106 ++++++------- plugins/inputs/opcua/read_client.go | 26 +-- .../inputs/opcua_listener/opcua_listener.go | 43 +++-- .../opcua_listener/opcua_listener_test.go | 148 +++++++++--------- .../inputs/opcua_listener/subscribe_client.go | 20 +-- plugins/inputs/openldap/openldap.go | 82 +++++----- plugins/inputs/openntpd/openntpd.go | 93 ++++++----- plugins/inputs/openntpd/openntpd_test.go | 14 +- .../opensearch_query/aggregation.bucket.go | 14 +- .../inputs/opensearch_query/aggregation.go | 16 +- .../opensearch_query/aggregation.metric.go | 4 +- .../opensearch_query/aggregation.response.go | 12 +- .../opensearch_query/opensearch_query.go | 90 +++++------ .../opensearch_query/opensearch_query_test.go | 17 +- plugins/inputs/opensearch_query/query.go | 8 +- plugins/inputs/opensmtpd/opensmtpd.go | 63 ++++---- plugins/inputs/opensmtpd/opensmtpd_test.go | 4 +- plugins/inputs/openstack/openstack.go | 33 ++-- plugins/inputs/opentelemetry/opentelemetry.go | 8 +- .../opentelemetry/opentelemetry_test.go | 2 +- .../inputs/openweathermap/openweathermap.go | 6 +- plugins/inputs/openweathermap/types.go | 10 +- 23 files changed, 407 insertions(+), 424 deletions(-) diff --git a/plugins/inputs/opcua/opcua.go b/plugins/inputs/opcua/opcua.go index dd0109adc5f8a..839b3d99cabe2 100644 --- a/plugins/inputs/opcua/opcua.go +++ b/plugins/inputs/opcua/opcua.go @@ -16,26 +16,24 @@ import ( var sampleConfig string type OpcUA struct { - ReadClientConfig + readClientConfig Log telegraf.Logger `toml:"-"` - client *ReadClient + client *readClient } func (*OpcUA) SampleConfig() string { return sampleConfig } -// Init Initialise all required objects func (o *OpcUA) Init() (err error) { - o.client, err = o.ReadClientConfig.CreateReadClient(o.Log) + o.client, err = o.readClientConfig.createReadClient(o.Log) return err } -// Gather defines what data the plugin will gather. func (o *OpcUA) Gather(acc telegraf.Accumulator) error { // Will (re)connect if the client is disconnected - metrics, err := o.client.CurrentValues() + metrics, err := o.client.currentValues() if err != nil { return err } @@ -51,7 +49,7 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("opcua", func() telegraf.Input { return &OpcUA{ - ReadClientConfig: ReadClientConfig{ + readClientConfig: readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", diff --git a/plugins/inputs/opcua/opcua_test.go b/plugins/inputs/opcua/opcua_test.go index ee18778a9141d..2c0bc559128a7 100644 --- a/plugins/inputs/opcua/opcua_test.go +++ b/plugins/inputs/opcua/opcua_test.go @@ -19,19 +19,19 @@ import ( const servicePort = "4840" -type OPCTags struct { - Name string - Namespace string - IdentifierType string - Identifier string - Want interface{} +type opcTags struct { + name string + namespace string + identifierType string + identifier string + want interface{} } -func MapOPCTag(tags OPCTags) (out input.NodeSettings) { - out.FieldName = tags.Name - out.Namespace = tags.Namespace - out.IdentifierType = tags.IdentifierType - out.Identifier = tags.Identifier +func mapOPCTag(tags opcTags) (out input.NodeSettings) { + out.FieldName = tags.name + out.Namespace = tags.namespace + out.IdentifierType = tags.identifierType + out.Identifier = tags.identifier return out } @@ -52,13 +52,13 @@ func TestGetDataBadNodeContainerIntegration(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "1", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -83,14 +83,14 @@ func TestGetDataBadNodeContainerIntegration(t *testing.T) { } for _, tags := range testopctags { - g.Nodes = append(g.Nodes, MapOPCTag(tags)) + g.Nodes = append(g.Nodes, mapOPCTag(tags)) } readConfig.Groups = append(readConfig.Groups, g) logger := &testutil.CaptureLogger{} - readClient, err := readConfig.CreateReadClient(logger) + readClient, err := readConfig.createReadClient(logger) require.NoError(t, err) - err = readClient.Connect() + err = readClient.connect() require.NoError(t, err) } @@ -111,7 +111,7 @@ func TestReadClientIntegration(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -120,7 +120,7 @@ func TestReadClientIntegration(t *testing.T) { {"DateTime", "1", "i", "51037", "0001-01-01T00:00:00Z"}, } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -138,17 +138,17 @@ func TestReadClientIntegration(t *testing.T) { } for _, tags := range testopctags { - readConfig.RootNodes = append(readConfig.RootNodes, MapOPCTag(tags)) + readConfig.RootNodes = append(readConfig.RootNodes, mapOPCTag(tags)) } - client, err := readConfig.CreateReadClient(testutil.Logger{}) + client, err := readConfig.createReadClient(testutil.Logger{}) require.NoError(t, err) - err = client.Connect() - require.NoError(t, err, "Connect") + err = client.connect() + require.NoError(t, err) for i, v := range client.LastReceivedData { - require.Equal(t, testopctags[i].Want, v.Value) + require.Equal(t, testopctags[i].want, v.Value) } } @@ -168,7 +168,7 @@ func TestReadClientIntegrationAdditionalFields(t *testing.T) { require.NoError(t, container.Start(), "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -196,17 +196,17 @@ func TestReadClientIntegrationAdditionalFields(t *testing.T) { for i, x := range testopctags { now := time.Now() tags := map[string]string{ - "id": fmt.Sprintf("ns=%s;%s=%s", x.Namespace, x.IdentifierType, x.Identifier), + "id": fmt.Sprintf("ns=%s;%s=%s", x.namespace, x.identifierType, x.identifier), } fields := map[string]interface{}{ - x.Name: x.Want, + x.name: x.want, "Quality": testopcquality[i], "DataType": testopctypes[i], } expectedopcmetrics = append(expectedopcmetrics, metric.New("testing", tags, fields, now)) } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -225,13 +225,13 @@ func TestReadClientIntegrationAdditionalFields(t *testing.T) { } for _, tags := range testopctags { - readConfig.RootNodes = append(readConfig.RootNodes, MapOPCTag(tags)) + readConfig.RootNodes = append(readConfig.RootNodes, mapOPCTag(tags)) } - client, err := readConfig.CreateReadClient(testutil.Logger{}) + client, err := readConfig.createReadClient(testutil.Logger{}) require.NoError(t, err) - require.NoError(t, client.Connect()) + require.NoError(t, client.connect()) actualopcmetrics := make([]telegraf.Metric, 0, len(client.LastReceivedData)) for i := range client.LastReceivedData { @@ -258,13 +258,13 @@ func TestReadClientIntegrationWithPasswordAuth(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -284,17 +284,17 @@ func TestReadClientIntegrationWithPasswordAuth(t *testing.T) { } for _, tags := range testopctags { - readConfig.RootNodes = append(readConfig.RootNodes, MapOPCTag(tags)) + readConfig.RootNodes = append(readConfig.RootNodes, mapOPCTag(tags)) } - client, err := readConfig.CreateReadClient(testutil.Logger{}) + client, err := readConfig.createReadClient(testutil.Logger{}) require.NoError(t, err) - err = client.Connect() - require.NoError(t, err, "Connect") + err = client.connect() + require.NoError(t, err) for i, v := range client.LastReceivedData { - require.Equal(t, testopctags[i].Want, v.Value) + require.Equal(t, testopctags[i].want, v.Value) } } @@ -369,17 +369,17 @@ use_unregistered_reads = true o, ok := c.Inputs[0].Input.(*OpcUA) require.True(t, ok) - require.Equal(t, "localhost", o.ReadClientConfig.MetricName) - require.Equal(t, "opc.tcp://localhost:4840", o.ReadClientConfig.Endpoint) - require.Equal(t, config.Duration(10*time.Second), o.ReadClientConfig.ConnectTimeout) - require.Equal(t, config.Duration(5*time.Second), o.ReadClientConfig.RequestTimeout) - require.Equal(t, "auto", o.ReadClientConfig.SecurityPolicy) - require.Equal(t, "auto", o.ReadClientConfig.SecurityMode) - require.Equal(t, "/etc/telegraf/cert.pem", o.ReadClientConfig.Certificate) - require.Equal(t, "/etc/telegraf/key.pem", o.ReadClientConfig.PrivateKey) - require.Equal(t, "Anonymous", o.ReadClientConfig.AuthMethod) - require.True(t, o.ReadClientConfig.Username.Empty()) - require.True(t, o.ReadClientConfig.Password.Empty()) + require.Equal(t, "localhost", o.readClientConfig.MetricName) + require.Equal(t, "opc.tcp://localhost:4840", o.readClientConfig.Endpoint) + require.Equal(t, config.Duration(10*time.Second), o.readClientConfig.ConnectTimeout) + require.Equal(t, config.Duration(5*time.Second), o.readClientConfig.RequestTimeout) + require.Equal(t, "auto", o.readClientConfig.SecurityPolicy) + require.Equal(t, "auto", o.readClientConfig.SecurityMode) + require.Equal(t, "/etc/telegraf/cert.pem", o.readClientConfig.Certificate) + require.Equal(t, "/etc/telegraf/key.pem", o.readClientConfig.PrivateKey) + require.Equal(t, "Anonymous", o.readClientConfig.AuthMethod) + require.True(t, o.readClientConfig.Username.Empty()) + require.True(t, o.readClientConfig.Password.Empty()) require.Equal(t, []input.NodeSettings{ { FieldName: "name", @@ -396,7 +396,7 @@ use_unregistered_reads = true TagsSlice: [][]string{{"tag0", "val0"}, {"tag00", "val00"}}, DefaultTags: map[string]string{"tag6": "val6"}, }, - }, o.ReadClientConfig.RootNodes) + }, o.readClientConfig.RootNodes) require.Equal(t, []input.NodeGroupSettings{ { MetricName: "foo", @@ -424,10 +424,10 @@ use_unregistered_reads = true Identifier: "4001", }}, }, - }, o.ReadClientConfig.Groups) - require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.ReadClientConfig.Workarounds) - require.Equal(t, ReadClientWorkarounds{UseUnregisteredReads: true}, o.ReadClientConfig.ReadClientWorkarounds) - require.Equal(t, []string{"DataType"}, o.ReadClientConfig.OptionalFields) + }, o.readClientConfig.Groups) + require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.readClientConfig.Workarounds) + require.Equal(t, readClientWorkarounds{UseUnregisteredReads: true}, o.readClientConfig.ReadClientWorkarounds) + require.Equal(t, []string{"DataType"}, o.readClientConfig.OptionalFields) err = o.Init() require.NoError(t, err) require.Len(t, o.client.NodeMetricMapping, 5, "incorrect number of nodes") diff --git a/plugins/inputs/opcua/read_client.go b/plugins/inputs/opcua/read_client.go index 25d8a3f9576e3..f8e04e02ea568 100644 --- a/plugins/inputs/opcua/read_client.go +++ b/plugins/inputs/opcua/read_client.go @@ -15,33 +15,33 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -type ReadClientWorkarounds struct { +type readClientWorkarounds struct { UseUnregisteredReads bool `toml:"use_unregistered_reads"` } -type ReadClientConfig struct { +type readClientConfig struct { ReadRetryTimeout config.Duration `toml:"read_retry_timeout"` ReadRetries uint64 `toml:"read_retry_count"` - ReadClientWorkarounds ReadClientWorkarounds `toml:"request_workarounds"` + ReadClientWorkarounds readClientWorkarounds `toml:"request_workarounds"` input.InputClientConfig } -// ReadClient Requests the current values from the required nodes when gather is called. -type ReadClient struct { +// readClient Requests the current values from the required nodes when gather is called. +type readClient struct { *input.OpcUAInputClient ReadRetryTimeout time.Duration ReadRetries uint64 ReadSuccess selfstat.Stat ReadError selfstat.Stat - Workarounds ReadClientWorkarounds + Workarounds readClientWorkarounds // internal values reqIDs []*ua.ReadValueID ctx context.Context } -func (rc *ReadClientConfig) CreateReadClient(log telegraf.Logger) (*ReadClient, error) { +func (rc *readClientConfig) createReadClient(log telegraf.Logger) (*readClient, error) { inputClient, err := rc.InputClientConfig.CreateInputClient(log) if err != nil { return nil, err @@ -55,7 +55,7 @@ func (rc *ReadClientConfig) CreateReadClient(log telegraf.Logger) (*ReadClient, rc.ReadRetryTimeout = config.Duration(100 * time.Millisecond) } - return &ReadClient{ + return &readClient{ OpcUAInputClient: inputClient, ReadRetryTimeout: time.Duration(rc.ReadRetryTimeout), ReadRetries: rc.ReadRetries, @@ -65,7 +65,7 @@ func (rc *ReadClientConfig) CreateReadClient(log telegraf.Logger) (*ReadClient, }, nil } -func (o *ReadClient) Connect() error { +func (o *readClient) connect() error { o.ctx = context.Background() if err := o.OpcUAClient.Connect(o.ctx); err != nil { @@ -103,14 +103,14 @@ func (o *ReadClient) Connect() error { return nil } -func (o *ReadClient) ensureConnected() error { +func (o *readClient) ensureConnected() error { if o.State() == opcua.Disconnected || o.State() == opcua.Closed { - return o.Connect() + return o.connect() } return nil } -func (o *ReadClient) CurrentValues() ([]telegraf.Metric, error) { +func (o *readClient) currentValues() ([]telegraf.Metric, error) { if err := o.ensureConnected(); err != nil { return nil, err } @@ -142,7 +142,7 @@ func (o *ReadClient) CurrentValues() ([]telegraf.Metric, error) { return metrics, nil } -func (o *ReadClient) read() error { +func (o *readClient) read() error { req := &ua.ReadRequest{ MaxAge: 2000, TimestampsToReturn: ua.TimestampsToReturnBoth, diff --git a/plugins/inputs/opcua_listener/opcua_listener.go b/plugins/inputs/opcua_listener/opcua_listener.go index 9399f9c971869..6085c90c9f94c 100644 --- a/plugins/inputs/opcua_listener/opcua_listener.go +++ b/plugins/inputs/opcua_listener/opcua_listener.go @@ -15,8 +15,8 @@ import ( ) type OpcUaListener struct { - SubscribeClientConfig - client *SubscribeClient + subscribeClientConfig + client *subscribeClient Log telegraf.Logger `toml:"-"` } @@ -36,20 +36,35 @@ func (o *OpcUaListener) Init() (err error) { default: return fmt.Errorf("unknown setting %q for 'connect_fail_behavior'", o.ConnectFailBehavior) } - o.client, err = o.SubscribeClientConfig.CreateSubscribeClient(o.Log) + o.client, err = o.subscribeClientConfig.createSubscribeClient(o.Log) return err } +func (o *OpcUaListener) Start(acc telegraf.Accumulator) error { + return o.connect(acc) +} + func (o *OpcUaListener) Gather(acc telegraf.Accumulator) error { - if o.client.State() == opcua.Connected || o.SubscribeClientConfig.ConnectFailBehavior == "ignore" { + if o.client.State() == opcua.Connected || o.subscribeClientConfig.ConnectFailBehavior == "ignore" { return nil } return o.connect(acc) } +func (o *OpcUaListener) Stop() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + select { + case <-o.client.stop(ctx): + o.Log.Infof("Unsubscribed OPC UA successfully") + case <-ctx.Done(): // Timeout context + o.Log.Warn("Timeout while stopping OPC UA subscription") + } + cancel() +} + func (o *OpcUaListener) connect(acc telegraf.Accumulator) error { ctx := context.Background() - ch, err := o.client.StartStreamValues(ctx) + ch, err := o.client.startStreamValues(ctx) if err != nil { return err } @@ -68,26 +83,10 @@ func (o *OpcUaListener) connect(acc telegraf.Accumulator) error { return nil } -func (o *OpcUaListener) Start(acc telegraf.Accumulator) error { - return o.connect(acc) -} - -func (o *OpcUaListener) Stop() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - select { - case <-o.client.Stop(ctx): - o.Log.Infof("Unsubscribed OPC UA successfully") - case <-ctx.Done(): // Timeout context - o.Log.Warn("Timeout while stopping OPC UA subscription") - } - cancel() -} - -// Add this plugin to telegraf func init() { inputs.Add("opcua_listener", func() telegraf.Input { return &OpcUaListener{ - SubscribeClientConfig: SubscribeClientConfig{ + subscribeClientConfig: subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", diff --git a/plugins/inputs/opcua_listener/opcua_listener_test.go b/plugins/inputs/opcua_listener/opcua_listener_test.go index 86c384b76d709..5484252bbb56d 100644 --- a/plugins/inputs/opcua_listener/opcua_listener_test.go +++ b/plugins/inputs/opcua_listener/opcua_listener_test.go @@ -21,25 +21,25 @@ import ( const servicePort = "4840" -type OPCTags struct { - Name string - Namespace string - IdentifierType string - Identifier string - Want interface{} +type opcTags struct { + name string + namespace string + identifierType string + identifier string + want interface{} } -func MapOPCTag(tags OPCTags) (out input.NodeSettings) { - out.FieldName = tags.Name - out.Namespace = tags.Namespace - out.IdentifierType = tags.IdentifierType - out.Identifier = tags.Identifier +func mapOPCTag(tags opcTags) (out input.NodeSettings) { + out.FieldName = tags.name + out.Namespace = tags.namespace + out.IdentifierType = tags.identifierType + out.Identifier = tags.identifier return out } func TestInitPluginWithBadConnectFailBehaviorValue(t *testing.T) { plugin := OpcUaListener{ - SubscribeClientConfig: SubscribeClientConfig{ + subscribeClientConfig: subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://notarealserver:4840", @@ -69,7 +69,7 @@ func TestStartPlugin(t *testing.T) { acc := &testutil.Accumulator{} plugin := OpcUaListener{ - SubscribeClientConfig: SubscribeClientConfig{ + subscribeClientConfig: subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://notarealserver:4840", @@ -86,17 +86,17 @@ func TestStartPlugin(t *testing.T) { }, Log: testutil.Logger{}, } - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, } for _, tags := range testopctags { - plugin.SubscribeClientConfig.RootNodes = append(plugin.SubscribeClientConfig.RootNodes, MapOPCTag(tags)) + plugin.subscribeClientConfig.RootNodes = append(plugin.subscribeClientConfig.RootNodes, mapOPCTag(tags)) } require.NoError(t, plugin.Init()) err := plugin.Start(acc) require.ErrorContains(t, err, "could not resolve address") - plugin.SubscribeClientConfig.ConnectFailBehavior = "ignore" + plugin.subscribeClientConfig.ConnectFailBehavior = "ignore" require.NoError(t, plugin.Init()) require.NoError(t, plugin.Start(acc)) require.Equal(t, opcua.Disconnected, plugin.client.OpcUAClient.State()) @@ -110,7 +110,7 @@ func TestStartPlugin(t *testing.T) { wait.ForLog("TCP network layer listening on opc.tcp://"), ), } - plugin.SubscribeClientConfig.ConnectFailBehavior = "retry" + plugin.subscribeClientConfig.ConnectFailBehavior = "retry" require.NoError(t, plugin.Init()) require.NoError(t, plugin.Start(acc)) require.Equal(t, opcua.Disconnected, plugin.client.OpcUAClient.State()) @@ -144,7 +144,7 @@ func TestSubscribeClientIntegration(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -154,12 +154,12 @@ func TestSubscribeClientIntegration(t *testing.T) { } tagsRemaining := make([]string, 0, len(testopctags)) for i, tag := range testopctags { - if tag.Want != nil { - tagsRemaining = append(tagsRemaining, testopctags[i].Name) + if tag.want != nil { + tagsRemaining = append(tagsRemaining, testopctags[i].name) } } - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -177,9 +177,9 @@ func TestSubscribeClientIntegration(t *testing.T) { SubscriptionInterval: 0, } for _, tags := range testopctags { - subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, MapOPCTag(tags)) + subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, mapOPCTag(tags)) } - o, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + o, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.NoError(t, err) // give initial setup a couple extra attempts, as on CircleCI this can be @@ -188,12 +188,12 @@ func TestSubscribeClientIntegration(t *testing.T) { return o.SetupOptions() == nil }, 5*time.Second, 10*time.Millisecond) - err = o.Connect() + err = o.connect() require.NoError(t, err, "Connection failed") ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - res, err := o.StartStreamValues(ctx) + res, err := o.startStreamValues(ctx) require.Equal(t, opcua.Connected, o.State()) require.NoError(t, err) @@ -202,16 +202,16 @@ func TestSubscribeClientIntegration(t *testing.T) { case m := <-res: for fieldName, fieldValue := range m.Fields() { for _, tag := range testopctags { - if fieldName != tag.Name { + if fieldName != tag.name { continue } - if tag.Want == nil { - t.Errorf("Tag: %s has value: %v", tag.Name, fieldValue) + if tag.want == nil { + t.Errorf("Tag: %s has value: %v", tag.name, fieldValue) return } - require.Equal(t, tag.Want, fieldValue) + require.Equal(t, tag.want, fieldValue) newRemaining := make([]string, 0, len(tagsRemaining)) for _, remainingTag := range tagsRemaining { @@ -257,7 +257,7 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { require.NoError(t, container.Start(), "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -285,10 +285,10 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { for i, x := range testopctags { now := time.Now() tags := map[string]string{ - "id": fmt.Sprintf("ns=%s;%s=%s", x.Namespace, x.IdentifierType, x.Identifier), + "id": fmt.Sprintf("ns=%s;%s=%s", x.namespace, x.identifierType, x.identifier), } fields := map[string]interface{}{ - x.Name: x.Want, + x.name: x.want, "Quality": testopcquality[i], "DataType": testopctypes[i], } @@ -297,12 +297,12 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { tagsRemaining := make([]string, 0, len(testopctags)) for i, tag := range testopctags { - if tag.Want != nil { - tagsRemaining = append(tagsRemaining, testopctags[i].Name) + if tag.want != nil { + tagsRemaining = append(tagsRemaining, testopctags[i].name) } } - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -321,9 +321,9 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { SubscriptionInterval: 0, } for _, tags := range testopctags { - subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, MapOPCTag(tags)) + subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, mapOPCTag(tags)) } - o, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + o, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.NoError(t, err) // give initial setup a couple extra attempts, as on CircleCI this can be @@ -332,11 +332,11 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { return o.SetupOptions() == nil }, 5*time.Second, 10*time.Millisecond) - require.NoError(t, o.Connect(), "Connection failed") + require.NoError(t, o.connect(), "Connection failed") ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - res, err := o.StartStreamValues(ctx) + res, err := o.startStreamValues(ctx) require.NoError(t, err) for { @@ -344,12 +344,12 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { case m := <-res: for fieldName, fieldValue := range m.Fields() { for _, tag := range testopctags { - if fieldName != tag.Name { + if fieldName != tag.name { continue } // nil-value tags should not be sent from server, error if one does - if tag.Want == nil { - t.Errorf("Tag: %s has value: %v", tag.Name, fieldValue) + if tag.want == nil { + t.Errorf("Tag: %s has value: %v", tag.name, fieldValue) return } @@ -434,19 +434,19 @@ additional_valid_status_codes = ["0xC0"] o, ok := c.Inputs[0].Input.(*OpcUaListener) require.True(t, ok) - require.Equal(t, "localhost", o.SubscribeClientConfig.MetricName) - require.Equal(t, "opc.tcp://localhost:4840", o.SubscribeClientConfig.Endpoint) - require.Equal(t, config.Duration(10*time.Second), o.SubscribeClientConfig.ConnectTimeout) - require.Equal(t, config.Duration(5*time.Second), o.SubscribeClientConfig.RequestTimeout) - require.Equal(t, config.Duration(200*time.Millisecond), o.SubscribeClientConfig.SubscriptionInterval) - require.Equal(t, "error", o.SubscribeClientConfig.ConnectFailBehavior) - require.Equal(t, "auto", o.SubscribeClientConfig.SecurityPolicy) - require.Equal(t, "auto", o.SubscribeClientConfig.SecurityMode) - require.Equal(t, "/etc/telegraf/cert.pem", o.SubscribeClientConfig.Certificate) - require.Equal(t, "/etc/telegraf/key.pem", o.SubscribeClientConfig.PrivateKey) - require.Equal(t, "Anonymous", o.SubscribeClientConfig.AuthMethod) - require.True(t, o.SubscribeClientConfig.Username.Empty()) - require.True(t, o.SubscribeClientConfig.Password.Empty()) + require.Equal(t, "localhost", o.subscribeClientConfig.MetricName) + require.Equal(t, "opc.tcp://localhost:4840", o.subscribeClientConfig.Endpoint) + require.Equal(t, config.Duration(10*time.Second), o.subscribeClientConfig.ConnectTimeout) + require.Equal(t, config.Duration(5*time.Second), o.subscribeClientConfig.RequestTimeout) + require.Equal(t, config.Duration(200*time.Millisecond), o.subscribeClientConfig.SubscriptionInterval) + require.Equal(t, "error", o.subscribeClientConfig.ConnectFailBehavior) + require.Equal(t, "auto", o.subscribeClientConfig.SecurityPolicy) + require.Equal(t, "auto", o.subscribeClientConfig.SecurityMode) + require.Equal(t, "/etc/telegraf/cert.pem", o.subscribeClientConfig.Certificate) + require.Equal(t, "/etc/telegraf/key.pem", o.subscribeClientConfig.PrivateKey) + require.Equal(t, "Anonymous", o.subscribeClientConfig.AuthMethod) + require.True(t, o.subscribeClientConfig.Username.Empty()) + require.True(t, o.subscribeClientConfig.Password.Empty()) require.Equal(t, []input.NodeSettings{ { FieldName: "name", @@ -460,7 +460,7 @@ additional_valid_status_codes = ["0xC0"] IdentifierType: "s", Identifier: "two", }, - }, o.SubscribeClientConfig.RootNodes) + }, o.subscribeClientConfig.RootNodes) require.Equal(t, []input.NodeGroupSettings{ { MetricName: "foo", @@ -484,9 +484,9 @@ additional_valid_status_codes = ["0xC0"] TagsSlice: [][]string{{"tag1", "override"}}, }}, }, - }, o.SubscribeClientConfig.Groups) - require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.SubscribeClientConfig.Workarounds) - require.Equal(t, []string{"DataType"}, o.SubscribeClientConfig.OptionalFields) + }, o.subscribeClientConfig.Groups) + require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.subscribeClientConfig.Workarounds) + require.Equal(t, []string{"DataType"}, o.subscribeClientConfig.OptionalFields) } func TestSubscribeClientConfigWithMonitoringParams(t *testing.T) { @@ -548,11 +548,11 @@ deadband_value = 100.0 }, }}, }, - }, o.SubscribeClientConfig.Groups) + }, o.subscribeClientConfig.Groups) } func TestSubscribeClientConfigInvalidTrigger(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -581,12 +581,12 @@ func TestSubscribeClientConfigInvalidTrigger(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "trigger 'not_valid' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigMissingTrigger(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -615,12 +615,12 @@ func TestSubscribeClientConfigMissingTrigger(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "trigger '' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigInvalidDeadbandType(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -650,12 +650,12 @@ func TestSubscribeClientConfigInvalidDeadbandType(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "deadband_type 'not_valid' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigMissingDeadbandType(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -684,12 +684,12 @@ func TestSubscribeClientConfigMissingDeadbandType(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "deadband_type '' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigInvalidDeadbandValue(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -721,12 +721,12 @@ func TestSubscribeClientConfigInvalidDeadbandValue(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "negative deadband_value not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigMissingDeadbandValue(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -756,12 +756,12 @@ func TestSubscribeClientConfigMissingDeadbandValue(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "deadband_value was not set, node 'ns=3;i=1'") } func TestSubscribeClientConfigValidMonitoringParams(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -799,7 +799,7 @@ func TestSubscribeClientConfigValidMonitoringParams(t *testing.T) { }, }) - subClient, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + subClient, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.NoError(t, err) require.Equal(t, &ua.MonitoringParameters{ SamplingInterval: 50, diff --git a/plugins/inputs/opcua_listener/subscribe_client.go b/plugins/inputs/opcua_listener/subscribe_client.go index 320262bafbf60..1f70f006e7b6b 100644 --- a/plugins/inputs/opcua_listener/subscribe_client.go +++ b/plugins/inputs/opcua_listener/subscribe_client.go @@ -16,15 +16,15 @@ import ( "github.com/influxdata/telegraf/plugins/common/opcua/input" ) -type SubscribeClientConfig struct { +type subscribeClientConfig struct { input.InputClientConfig SubscriptionInterval config.Duration `toml:"subscription_interval"` ConnectFailBehavior string `toml:"connect_fail_behavior"` } -type SubscribeClient struct { +type subscribeClient struct { *input.OpcUAInputClient - Config SubscribeClientConfig + Config subscribeClientConfig sub *opcua.Subscription monitoredItemsReqs []*ua.MonitoredItemCreateRequest @@ -81,7 +81,7 @@ func assignConfigValuesToRequest(req *ua.MonitoredItemCreateRequest, monParams * return nil } -func (sc *SubscribeClientConfig) CreateSubscribeClient(log telegraf.Logger) (*SubscribeClient, error) { +func (sc *subscribeClientConfig) createSubscribeClient(log telegraf.Logger) (*subscribeClient, error) { client, err := sc.InputClientConfig.CreateInputClient(log) if err != nil { return nil, err @@ -92,7 +92,7 @@ func (sc *SubscribeClientConfig) CreateSubscribeClient(log telegraf.Logger) (*Su } processingCtx, processingCancel := context.WithCancel(context.Background()) - subClient := &SubscribeClient{ + subClient := &subscribeClient{ OpcUAInputClient: client, Config: *sc, monitoredItemsReqs: make([]*ua.MonitoredItemCreateRequest, len(client.NodeIDs)), @@ -118,7 +118,7 @@ func (sc *SubscribeClientConfig) CreateSubscribeClient(log telegraf.Logger) (*Su return subClient, nil } -func (o *SubscribeClient) Connect() error { +func (o *subscribeClient) connect() error { err := o.OpcUAClient.Connect(o.ctx) if err != nil { return err @@ -137,7 +137,7 @@ func (o *SubscribeClient) Connect() error { return nil } -func (o *SubscribeClient) Stop(ctx context.Context) <-chan struct{} { +func (o *subscribeClient) stop(ctx context.Context) <-chan struct{} { o.Log.Debugf("Stopping OPC subscription...") if o.State() != opcuaclient.Connected { return nil @@ -152,8 +152,8 @@ func (o *SubscribeClient) Stop(ctx context.Context) <-chan struct{} { return closing } -func (o *SubscribeClient) StartStreamValues(ctx context.Context) (<-chan telegraf.Metric, error) { - err := o.Connect() +func (o *subscribeClient) startStreamValues(ctx context.Context) (<-chan telegraf.Metric, error) { + err := o.connect() if err != nil { switch o.Config.ConnectFailBehavior { case "retry": @@ -191,7 +191,7 @@ func (o *SubscribeClient) StartStreamValues(ctx context.Context) (<-chan telegra return o.metrics, nil } -func (o *SubscribeClient) processReceivedNotifications() { +func (o *subscribeClient) processReceivedNotifications() { for { select { case <-o.ctx.Done(): diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index 81c63d13c4796..c89b623f9c802 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - ldap "github.com/go-ldap/ldap/v3" + "github.com/go-ldap/ldap/v3" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" @@ -17,56 +17,41 @@ import ( //go:embed sample.conf var sampleConfig string +var ( + searchBase = "cn=Monitor" + searchFilter = "(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))" + searchAttrs = []string{"monitorCounter", "monitorOpInitiated", "monitorOpCompleted", "monitoredInfo"} + attrTranslate = map[string]string{ + "monitorCounter": "", + "monitoredInfo": "", + "monitorOpInitiated": "_initiated", + "monitorOpCompleted": "_completed", + "olmMDBPagesMax": "_mdb_pages_max", + "olmMDBPagesUsed": "_mdb_pages_used", + "olmMDBPagesFree": "_mdb_pages_free", + "olmMDBReadersMax": "_mdb_readers_max", + "olmMDBReadersUsed": "_mdb_readers_used", + "olmMDBEntries": "_mdb_entries", + } +) + type Openldap struct { - Host string - Port int + Host string `toml:"host"` + Port int `toml:"port"` SSL string `toml:"ssl" deprecated:"1.7.0;1.35.0;use 'tls' instead"` TLS string `toml:"tls"` - InsecureSkipVerify bool + InsecureSkipVerify bool `toml:"insecure_skip_verify"` SSLCA string `toml:"ssl_ca" deprecated:"1.7.0;1.35.0;use 'tls_ca' instead"` TLSCA string `toml:"tls_ca"` - BindDn string - BindPassword string - ReverseMetricNames bool -} - -var searchBase = "cn=Monitor" -var searchFilter = "(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))" -var searchAttrs = []string{"monitorCounter", "monitorOpInitiated", "monitorOpCompleted", "monitoredInfo"} -var attrTranslate = map[string]string{ - "monitorCounter": "", - "monitoredInfo": "", - "monitorOpInitiated": "_initiated", - "monitorOpCompleted": "_completed", - "olmMDBPagesMax": "_mdb_pages_max", - "olmMDBPagesUsed": "_mdb_pages_used", - "olmMDBPagesFree": "_mdb_pages_free", - "olmMDBReadersMax": "_mdb_readers_max", - "olmMDBReadersUsed": "_mdb_readers_used", - "olmMDBEntries": "_mdb_entries", -} - -// return an initialized Openldap -func NewOpenldap() *Openldap { - return &Openldap{ - Host: "localhost", - Port: 389, - SSL: "", - TLS: "", - InsecureSkipVerify: false, - SSLCA: "", - TLSCA: "", - BindDn: "", - BindPassword: "", - ReverseMetricNames: false, - } + BindDn string `toml:"bind_dn"` + BindPassword string `toml:"bind_password"` + ReverseMetricNames bool `toml:"reverse_metric_names"` } func (*Openldap) SampleConfig() string { return sampleConfig } -// gather metrics func (o *Openldap) Gather(acc telegraf.Accumulator) error { if o.TLS == "" { o.TLS = o.SSL @@ -198,6 +183,21 @@ func dnToMetric(dn string, o *Openldap) string { return strings.ReplaceAll(metricName, ",", "") } +func newOpenldap() *Openldap { + return &Openldap{ + Host: "localhost", + Port: 389, + SSL: "", + TLS: "", + InsecureSkipVerify: false, + SSLCA: "", + TLSCA: "", + BindDn: "", + BindPassword: "", + ReverseMetricNames: false, + } +} + func init() { - inputs.Add("openldap", func() telegraf.Input { return NewOpenldap() }) + inputs.Add("openldap", func() telegraf.Input { return newOpenldap() }) } diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index 9066c6f5dc268..9daf61673bbc9 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -20,60 +20,38 @@ import ( //go:embed sample.conf var sampleConfig string -// Mapping of the ntpctl tag key to the index in the command output -var tagI = map[string]int{ - "stratum": 2, -} - -// Mapping of float metrics to their index in the command output -var floatI = map[string]int{ - "offset": 5, - "delay": 6, - "jitter": 7, -} +var ( + defaultBinary = "/usr/sbin/ntpctl" + defaultTimeout = config.Duration(5 * time.Second) -// Mapping of int metrics to their index in the command output -var intI = map[string]int{ - "wt": 0, - "tl": 1, - "next": 3, - "poll": 4, -} - -type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) + // Mapping of the ntpctl tag key to the index in the command output + tagI = map[string]int{ + "stratum": 2, + } + // Mapping of float metrics to their index in the command output + floatI = map[string]int{ + "offset": 5, + "delay": 6, + "jitter": 7, + } + // Mapping of int metrics to their index in the command output + intI = map[string]int{ + "wt": 0, + "tl": 1, + "next": 3, + "poll": 4, + } +) -// Openntpd is used to store configuration values type Openntpd struct { - Binary string - Timeout config.Duration - UseSudo bool + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` run runner } -var defaultBinary = "/usr/sbin/ntpctl" -var defaultTimeout = config.Duration(5 * time.Second) - -// Shell out to ntpctl and return the output -func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { - cmdArgs := []string{"-s", "peers"} - - cmd := exec.Command(cmdName, cmdArgs...) - - if useSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) - cmd = exec.Command("sudo", cmdArgs...) - } - - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) - if err != nil { - return &out, fmt.Errorf("error running ntpctl: %w", err) - } - - return &out, nil -} +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) func (*Openntpd) SampleConfig() string { return sampleConfig @@ -190,6 +168,27 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { return nil } +// Shell out to ntpctl and return the output +func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { + cmdArgs := []string{"-s", "peers"} + + cmd := exec.Command(cmdName, cmdArgs...) + + if useSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running ntpctl: %w", err) + } + + return &out, nil +} + func init() { inputs.Add("openntpd", func() telegraf.Input { return &Openntpd{ diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index df3b7187b094f..0ea15d0aa5703 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { +func openntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } @@ -19,7 +19,7 @@ func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buff func TestParseSimpleOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutput), + run: openntpdCTL(simpleOutput), } err := v.Gather(acc) @@ -50,7 +50,7 @@ func TestParseSimpleOutput(t *testing.T) { func TestParseSimpleOutputwithStatePrefix(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputwithStatePrefix), + run: openntpdCTL(simpleOutputwithStatePrefix), } err := v.Gather(acc) @@ -82,7 +82,7 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { func TestParseSimpleOutputInvalidPeer(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputInvalidPeer), + run: openntpdCTL(simpleOutputInvalidPeer), } err := v.Gather(acc) @@ -110,7 +110,7 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { func TestParseSimpleOutputServersDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServersDNSError), + run: openntpdCTL(simpleOutputServersDNSError), } err := v.Gather(acc) @@ -152,7 +152,7 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { func TestParseSimpleOutputServerDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServerDNSError), + run: openntpdCTL(simpleOutputServerDNSError), } err := v.Gather(acc) @@ -180,7 +180,7 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(fullOutput), + run: openntpdCTL(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/opensearch_query/aggregation.bucket.go b/plugins/inputs/opensearch_query/aggregation.bucket.go index 87669e5c7ad0f..fcde3cc27320f 100644 --- a/plugins/inputs/opensearch_query/aggregation.bucket.go +++ b/plugins/inputs/opensearch_query/aggregation.bucket.go @@ -5,9 +5,9 @@ import ( "fmt" ) -type BucketAggregationRequest map[string]*aggregationFunction +type bucketAggregationRequest map[string]*aggregationFunction -func (b BucketAggregationRequest) AddAggregation(name, aggType, field string) error { +func (b bucketAggregationRequest) addAggregation(name, aggType, field string) error { switch aggType { case "terms": default: @@ -22,11 +22,11 @@ func (b BucketAggregationRequest) AddAggregation(name, aggType, field string) er return nil } -func (b BucketAggregationRequest) AddNestedAggregation(name string, a AggregationRequest) { +func (b bucketAggregationRequest) addNestedAggregation(name string, a aggregationRequest) { b[name].nested = a } -func (b BucketAggregationRequest) BucketSize(name string, size int) error { +func (b bucketAggregationRequest) bucketSize(name string, size int) error { if size <= 0 { return errors.New("invalid size; must be integer value > 0") } @@ -35,11 +35,11 @@ func (b BucketAggregationRequest) BucketSize(name string, size int) error { return fmt.Errorf("aggregation %q not found", name) } - b[name].Size(size) + b[name].setSize(size) return nil } -func (b BucketAggregationRequest) Missing(name, missing string) { - b[name].Missing(missing) +func (b bucketAggregationRequest) missing(name, missing string) { + b[name].setMissing(missing) } diff --git a/plugins/inputs/opensearch_query/aggregation.go b/plugins/inputs/opensearch_query/aggregation.go index 4dc8f7b070ec3..e4c8f68ad5875 100644 --- a/plugins/inputs/opensearch_query/aggregation.go +++ b/plugins/inputs/opensearch_query/aggregation.go @@ -4,14 +4,8 @@ import ( "encoding/json" ) -type AggregationRequest interface { - AddAggregation(string, string, string) error -} - -type NestedAggregation interface { - Nested(string, AggregationRequest) - Missing(string) - Size(int) +type aggregationRequest interface { + addAggregation(string, string, string) error } type aggregationFunction struct { @@ -20,7 +14,7 @@ type aggregationFunction struct { size int missing string - nested AggregationRequest + nested aggregationRequest } func (a *aggregationFunction) MarshalJSON() ([]byte, error) { @@ -45,11 +39,11 @@ func (a *aggregationFunction) MarshalJSON() ([]byte, error) { return json.Marshal(agg) } -func (a *aggregationFunction) Size(size int) { +func (a *aggregationFunction) setSize(size int) { a.size = size } -func (a *aggregationFunction) Missing(missing string) { +func (a *aggregationFunction) setMissing(missing string) { a.missing = missing } diff --git a/plugins/inputs/opensearch_query/aggregation.metric.go b/plugins/inputs/opensearch_query/aggregation.metric.go index d18296757af0e..084b9e2c3de0f 100644 --- a/plugins/inputs/opensearch_query/aggregation.metric.go +++ b/plugins/inputs/opensearch_query/aggregation.metric.go @@ -2,9 +2,9 @@ package opensearch_query import "fmt" -type MetricAggregationRequest map[string]*aggregationFunction +type metricAggregationRequest map[string]*aggregationFunction -func (m MetricAggregationRequest) AddAggregation(name, aggType, field string) error { +func (m metricAggregationRequest) addAggregation(name, aggType, field string) error { if t := getAggregationFunctionType(aggType); t != "metric" { return fmt.Errorf("aggregation function %q not supported", aggType) } diff --git a/plugins/inputs/opensearch_query/aggregation.response.go b/plugins/inputs/opensearch_query/aggregation.response.go index 54c5f173feb57..122a0cabb0407 100644 --- a/plugins/inputs/opensearch_query/aggregation.response.go +++ b/plugins/inputs/opensearch_query/aggregation.response.go @@ -36,7 +36,7 @@ type bucketData struct { subaggregation aggregation } -func (a *aggregationResponse) GetMetrics(acc telegraf.Accumulator, measurement string) error { +func (a *aggregationResponse) getMetrics(acc telegraf.Accumulator, measurement string) error { // Simple case (no aggregations) if a.Aggregations == nil { tags := make(map[string]string) @@ -47,20 +47,20 @@ func (a *aggregationResponse) GetMetrics(acc telegraf.Accumulator, measurement s return nil } - return a.Aggregations.GetMetrics(acc, measurement, a.Hits.TotalHits.Value, make(map[string]string)) + return a.Aggregations.getMetrics(acc, measurement, a.Hits.TotalHits.Value, make(map[string]string)) } -func (a *aggregation) GetMetrics(acc telegraf.Accumulator, measurement string, docCount int64, tags map[string]string) error { +func (a *aggregation) getMetrics(acc telegraf.Accumulator, measurement string, docCount int64, tags map[string]string) error { var err error fields := make(map[string]interface{}) for name, agg := range *a { - if agg.IsAggregation() { + if agg.isAggregation() { for _, bucket := range agg.buckets { tt := map[string]string{name: bucket.Key} for k, v := range tags { tt[k] = v } - err = bucket.subaggregation.GetMetrics(acc, measurement, bucket.DocumentCount, tt) + err = bucket.subaggregation.getMetrics(acc, measurement, bucket.DocumentCount, tt) if err != nil { return err } @@ -101,7 +101,7 @@ func (a *aggregateValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, &a.metrics) } -func (a *aggregateValue) IsAggregation() bool { +func (a *aggregateValue) isAggregation() bool { return !(a.buckets == nil) } diff --git a/plugins/inputs/opensearch_query/opensearch_query.go b/plugins/inputs/opensearch_query/opensearch_query.go index b9cefce59e025..833bbaab960c1 100644 --- a/plugins/inputs/opensearch_query/opensearch_query.go +++ b/plugins/inputs/opensearch_query/opensearch_query.go @@ -25,7 +25,6 @@ import ( //go:embed sample.conf var sampleConfig string -// OpensearchQuery struct type OpensearchQuery struct { URLs []string `toml:"urls"` Username config.Secret `toml:"username"` @@ -41,7 +40,6 @@ type OpensearchQuery struct { osClient *opensearch.Client } -// osAggregation struct type osAggregation struct { Index string `toml:"index"` MeasurementName string `toml:"measurement_name"` @@ -56,14 +54,13 @@ type osAggregation struct { MissingTagValue string `toml:"missing_tag_value"` mapMetricFields map[string]string - aggregation AggregationRequest + aggregation aggregationRequest } func (*OpensearchQuery) SampleConfig() string { return sampleConfig } -// Init the plugin. func (o *OpensearchQuery) Init() error { if o.URLs == nil { return errors.New("no urls defined") @@ -89,19 +86,21 @@ func (o *OpensearchQuery) Init() error { return nil } -func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) { - for _, metricField := range agg.MetricFields { - if _, ok := agg.mapMetricFields[metricField]; !ok { - return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index) - } - } +func (o *OpensearchQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup - err = agg.buildAggregationQuery() - if err != nil { - return fmt.Errorf("error building aggregation: %w", err) + for _, agg := range o.Aggregations { + wg.Add(1) + go func(agg osAggregation) { + defer wg.Done() + err := o.osAggregationQuery(acc, agg) + if err != nil { + acc.AddError(fmt.Errorf("opensearch query aggregation %q: %w ", agg.MeasurementName, err)) + } + }(agg) } - o.Aggregations[i] = agg + wg.Wait() return nil } @@ -136,22 +135,19 @@ func (o *OpensearchQuery) newClient() error { return err } -// Gather writes the results of the queries from OpenSearch to the Accumulator. -func (o *OpensearchQuery) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup +func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) { + for _, metricField := range agg.MetricFields { + if _, ok := agg.mapMetricFields[metricField]; !ok { + return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index) + } + } - for _, agg := range o.Aggregations { - wg.Add(1) - go func(agg osAggregation) { - defer wg.Done() - err := o.osAggregationQuery(acc, agg) - if err != nil { - acc.AddError(fmt.Errorf("opensearch query aggregation %q: %w ", agg.MeasurementName, err)) - } - }(agg) + err = agg.buildAggregationQuery() + if err != nil { + return fmt.Errorf("error building aggregation: %w", err) } - wg.Wait() + o.Aggregations[i] = agg return nil } @@ -164,16 +160,7 @@ func (o *OpensearchQuery) osAggregationQuery(acc telegraf.Accumulator, aggregati return err } - return searchResult.GetMetrics(acc, aggregation.MeasurementName) -} - -func init() { - inputs.Add("opensearch_query", func() telegraf.Input { - return &OpensearchQuery{ - Timeout: config.Duration(time.Second * 5), - HealthCheckInterval: config.Duration(time.Second * 10), - } - }) + return searchResult.getMetrics(acc, aggregation.MeasurementName) } func (o *OpensearchQuery) runAggregationQuery(ctx context.Context, aggregation osAggregation) (*aggregationResponse, error) { @@ -184,13 +171,13 @@ func (o *OpensearchQuery) runAggregationQuery(ctx context.Context, aggregation o filterQuery = "*" } - aq := &Query{ + aq := &query{ Size: 0, Aggregations: aggregation.aggregation, Query: nil, } - boolQuery := &BoolQuery{ + boolQuery := &boolQuery{ FilterQueryString: filterQuery, TimestampField: aggregation.DateField, TimeRangeFrom: from, @@ -231,8 +218,8 @@ func (o *OpensearchQuery) runAggregationQuery(ctx context.Context, aggregation o } func (aggregation *osAggregation) buildAggregationQuery() error { - var agg AggregationRequest - agg = &MetricAggregationRequest{} + var agg aggregationRequest + agg = &metricAggregationRequest{} // create one aggregation per metric field found & function defined for numeric fields for k, v := range aggregation.mapMetricFields { @@ -242,7 +229,7 @@ func (aggregation *osAggregation) buildAggregationQuery() error { continue } - err := agg.AddAggregation(strings.ReplaceAll(k, ".", "_")+"_"+aggregation.MetricFunction, aggregation.MetricFunction, k) + err := agg.addAggregation(strings.ReplaceAll(k, ".", "_")+"_"+aggregation.MetricFunction, aggregation.MetricFunction, k) if err != nil { return err } @@ -250,21 +237,21 @@ func (aggregation *osAggregation) buildAggregationQuery() error { // create a terms aggregation per tag for _, term := range aggregation.Tags { - bucket := &BucketAggregationRequest{} + bucket := &bucketAggregationRequest{} name := strings.ReplaceAll(term, ".", "_") - err := bucket.AddAggregation(name, "terms", term) + err := bucket.addAggregation(name, "terms", term) if err != nil { return err } - err = bucket.BucketSize(name, 1000) + err = bucket.bucketSize(name, 1000) if err != nil { return err } if aggregation.IncludeMissingTag && aggregation.MissingTagValue != "" { - bucket.Missing(name, aggregation.MissingTagValue) + bucket.missing(name, aggregation.MissingTagValue) } - bucket.AddNestedAggregation(name, agg) + bucket.addNestedAggregation(name, agg) agg = bucket } @@ -273,3 +260,12 @@ func (aggregation *osAggregation) buildAggregationQuery() error { return nil } + +func init() { + inputs.Add("opensearch_query", func() telegraf.Input { + return &OpensearchQuery{ + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), + } + }) +} diff --git a/plugins/inputs/opensearch_query/opensearch_query_test.go b/plugins/inputs/opensearch_query/opensearch_query_test.go index cc8627a98d4a8..2a1aced1e5418 100644 --- a/plugins/inputs/opensearch_query/opensearch_query_test.go +++ b/plugins/inputs/opensearch_query/opensearch_query_test.go @@ -12,13 +12,14 @@ import ( "time" "github.com/docker/go-connections/nat" + "github.com/opensearch-project/opensearch-go/v2/opensearchutil" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/opensearch-project/opensearch-go/v2/opensearchutil" - "github.com/stretchr/testify/require" - "github.com/testcontainers/testcontainers-go/wait" ) const ( @@ -674,18 +675,18 @@ func TestOpensearchQueryIntegration(t *testing.T) { } func TestMetricAggregationMarshal(t *testing.T) { - agg := &MetricAggregationRequest{} - err := agg.AddAggregation("sum_taxful_total_price", "sum", "taxful_total_price") + agg := &metricAggregationRequest{} + err := agg.addAggregation("sum_taxful_total_price", "sum", "taxful_total_price") require.NoError(t, err) _, err = json.Marshal(agg) require.NoError(t, err) - bucket := &BucketAggregationRequest{} - err = bucket.AddAggregation("terms_by_currency", "terms", "currency") + bucket := &bucketAggregationRequest{} + err = bucket.addAggregation("terms_by_currency", "terms", "currency") require.NoError(t, err) - bucket.AddNestedAggregation("terms_by_currency", agg) + bucket.addNestedAggregation("terms_by_currency", agg) _, err = json.Marshal(bucket) require.NoError(t, err) } diff --git a/plugins/inputs/opensearch_query/query.go b/plugins/inputs/opensearch_query/query.go index 1a12aa5e3c594..e06f518f6c22b 100644 --- a/plugins/inputs/opensearch_query/query.go +++ b/plugins/inputs/opensearch_query/query.go @@ -5,13 +5,13 @@ import ( "time" ) -type Query struct { +type query struct { Size int `json:"size"` - Aggregations AggregationRequest `json:"aggregations"` + Aggregations aggregationRequest `json:"aggregations"` Query interface{} `json:"query,omitempty"` } -type BoolQuery struct { +type boolQuery struct { FilterQueryString string TimestampField string TimeRangeFrom time.Time @@ -19,7 +19,7 @@ type BoolQuery struct { DateFieldFormat string } -func (b *BoolQuery) MarshalJSON() ([]byte, error) { +func (b *boolQuery) MarshalJSON() ([]byte, error) { // Construct range dateTimeRange := map[string]interface{}{ "from": b.TimeRangeFrom, diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 89f0f822d8c42..e3511e9a1f59f 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -21,49 +21,29 @@ import ( //go:embed sample.conf var sampleConfig string -type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) +var ( + defaultBinary = "/usr/sbin/smtpctl" + defaultTimeout = config.Duration(time.Second) +) -// Opensmtpd is used to store configuration values type Opensmtpd struct { - Binary string - Timeout config.Duration - UseSudo bool + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` run runner } -var defaultBinary = "/usr/sbin/smtpctl" -var defaultTimeout = config.Duration(time.Second) - -// Shell out to opensmtpd_stat and return the output -func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { - cmdArgs := []string{"show", "stats"} - - cmd := exec.Command(cmdName, cmdArgs...) - - if useSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) - cmd = exec.Command("sudo", cmdArgs...) - } - - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) - if err != nil { - return &out, fmt.Errorf("error running smtpctl: %w", err) - } - - return &out, nil -} +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) -// Gather collects the configured stats from smtpctl and adds them to the -// Accumulator func (*Opensmtpd) SampleConfig() string { return sampleConfig } -// All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { + // All the dots in stat name will be replaced by underscores. + // Histogram statistics will not be collected. + // Always exclude uptime.human statistics statExcluded := []string{"uptime.human"} filterExcluded, err := filter.Compile(statExcluded) @@ -108,6 +88,27 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { return nil } +// Shell out to opensmtpd_stat and return the output +func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { + cmdArgs := []string{"show", "stats"} + + cmd := exec.Command(cmdName, cmdArgs...) + + if useSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running smtpctl: %w", err) + } + + return &out, nil +} + func init() { inputs.Add("opensmtpd", func() telegraf.Input { return &Opensmtpd{ diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 599bf500895f9..5d856f68b761c 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { +func smtpCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } @@ -19,7 +19,7 @@ func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Opensmtpd{ - run: SMTPCTL(fullOutput), + run: smtpCTL(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go index 4031ac2217711..881466a540e89 100644 --- a/plugins/inputs/openstack/openstack.go +++ b/plugins/inputs/openstack/openstack.go @@ -57,7 +57,6 @@ var ( typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`) ) -// OpenStack is the main structure associated with a collection instance. type OpenStack struct { // Configuration variables IdentityEndpoint string `toml:"authentication_endpoint"` @@ -93,19 +92,10 @@ type OpenStack struct { services map[string]bool } -// convertTimeFormat, to convert time format based on HumanReadableTS -func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { - if o.HumanReadableTS { - return t.Format("2006-01-02T15:04:05.999999999Z07:00") - } - return t.UnixNano() -} - func (*OpenStack) SampleConfig() string { return sampleConfig } -// initialize performs any necessary initialization functions func (o *OpenStack) Init() error { if len(o.EnabledServices) == 0 { o.EnabledServices = []string{"services", "projects", "hypervisors", "flavors", "networks", "volumes"} @@ -266,14 +256,6 @@ func (o *OpenStack) Start(telegraf.Accumulator) error { return nil } -func (o *OpenStack) Stop() { - if o.client != nil { - o.client.CloseIdleConnections() - } -} - -// Gather gathers resources from the OpenStack API and accumulates metrics. This -// implements the Input interface. func (o *OpenStack) Gather(acc telegraf.Accumulator) error { ctx := context.Background() callDuration := make(map[string]interface{}, len(o.services)) @@ -344,6 +326,12 @@ func (o *OpenStack) Gather(acc telegraf.Accumulator) error { return nil } +func (o *OpenStack) Stop() { + if o.client != nil { + o.client.CloseIdleConnections() + } +} + func (o *OpenStack) availableServicesFromAuth(provider *gophercloud.ProviderClient) (bool, error) { authResult := provider.GetAuthResult() if authResult == nil { @@ -1067,7 +1055,14 @@ func (o *OpenStack) gatherServerDiagnostics(ctx context.Context, acc telegraf.Ac return nil } -// init registers a callback which creates a new OpenStack input instance. +// convertTimeFormat, to convert time format based on HumanReadableTS +func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { + if o.HumanReadableTS { + return t.Format("2006-01-02T15:04:05.999999999Z07:00") + } + return t.UnixNano() +} + func init() { inputs.Add("openstack", func() telegraf.Input { return &OpenStack{ diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go index 6b6bd5a695877..3323569935728 100644 --- a/plugins/inputs/opentelemetry/opentelemetry.go +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -46,10 +46,6 @@ func (*OpenTelemetry) SampleConfig() string { return sampleConfig } -func (*OpenTelemetry) Gather(telegraf.Accumulator) error { - return nil -} - func (o *OpenTelemetry) Init() error { if o.ServiceAddress == "" { o.ServiceAddress = "0.0.0.0:4317" @@ -123,6 +119,10 @@ func (o *OpenTelemetry) Start(acc telegraf.Accumulator) error { return nil } +func (*OpenTelemetry) Gather(telegraf.Accumulator) error { + return nil +} + func (o *OpenTelemetry) Stop() { if o.grpcServer != nil { o.grpcServer.Stop() diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index ae6b198f2da5b..751aaf1af3b12 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb-observability/otel2influx" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/sdk/metric" @@ -24,7 +25,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/protobuf/encoding/protojson" - "github.com/influxdata/influxdb-observability/otel2influx" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 6543b113abcb2..4eb354474d150 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -174,7 +174,7 @@ func (n *OpenWeatherMap) gatherWeather(acc telegraf.Accumulator, city string) er return fmt.Errorf("querying %q failed: %w", addr, err) } - var e WeatherEntry + var e weatherEntry if err := json.Unmarshal(buf, &e); err != nil { return fmt.Errorf("parsing JSON response failed: %w", err) } @@ -223,7 +223,7 @@ func (n *OpenWeatherMap) gatherWeatherBatch(acc telegraf.Accumulator, cities str return fmt.Errorf("querying %q failed: %w", addr, err) } - var status Status + var status status if err := json.Unmarshal(buf, &status); err != nil { return fmt.Errorf("parsing JSON response failed: %w", err) } @@ -274,7 +274,7 @@ func (n *OpenWeatherMap) gatherForecast(acc telegraf.Accumulator, city string) e return fmt.Errorf("querying %q failed: %w", addr, err) } - var status Status + var status status if err := json.Unmarshal(buf, &status); err != nil { return fmt.Errorf("parsing JSON response failed: %w", err) } diff --git a/plugins/inputs/openweathermap/types.go b/plugins/inputs/openweathermap/types.go index 4920dd3f7acde..8fc170a472aa1 100644 --- a/plugins/inputs/openweathermap/types.go +++ b/plugins/inputs/openweathermap/types.go @@ -1,6 +1,6 @@ package openweathermap -type WeatherEntry struct { +type weatherEntry struct { Dt int64 `json:"dt"` Clouds struct { All int64 `json:"all"` @@ -43,21 +43,21 @@ type WeatherEntry struct { } `json:"weather"` } -func (e WeatherEntry) snow() float64 { +func (e weatherEntry) snow() float64 { if e.Snow.Snow1 > 0 { return e.Snow.Snow1 } return e.Snow.Snow3 } -func (e WeatherEntry) rain() float64 { +func (e weatherEntry) rain() float64 { if e.Rain.Rain1 > 0 { return e.Rain.Rain1 } return e.Rain.Rain3 } -type Status struct { +type status struct { City struct { Coord struct { Lat float64 `json:"lat"` @@ -67,5 +67,5 @@ type Status struct { ID int64 `json:"id"` Name string `json:"name"` } `json:"city"` - List []WeatherEntry `json:"list"` + List []weatherEntry `json:"list"` } From bec49c2ebe968655e0c6b25b2ba450271cfe9efe Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:36:47 +0100 Subject: [PATCH 052/170] chore(parsers.avro): Add unit-test for enum (#16260) --- plugins/parsers/avro/parser_test.go | 12 +++--- .../bad-timestamp-format/expected.err | 0 .../bad-timestamp-format/expected.out | 0 .../bad-timestamp-format/message.avro | 0 .../bad-timestamp-format/telegraf.conf | 4 +- .../benchmark/expected.out | 0 .../benchmark/message.json | 0 .../benchmark/telegraf.conf | 2 +- .../config-both/expected.err | 0 .../config-both/expected.out | 0 .../config-both/message.avro | 0 .../config-both/telegraf.conf | 4 +- .../config-neither/expected.err | 0 .../config-neither/expected.out | 0 .../config-neither/message.avro | 0 .../config-neither/telegraf.conf | 2 +- .../parsers/avro/testcases/enum/expected.out | 1 + .../parsers/avro/testcases/enum/message.json | 7 ++++ .../parsers/avro/testcases/enum/telegraf.conf | 41 +++++++++++++++++++ .../json-array/expected.out | 0 .../json-array/message.json | 0 .../json-array/telegraf.conf | 2 +- .../json-format/expected.out | 0 .../json-format/message.json | 0 .../json-format/telegraf.conf | 2 +- .../expected.out | 0 .../message.avro | 0 .../telegraf.conf | 4 +- .../no-timestamp-format/expected.out | 0 .../no-timestamp-format/message.avro | 0 .../no-timestamp-format/telegraf.conf | 4 +- .../supplied_timestamp/expected.out | 0 .../supplied_timestamp/message.avro | 0 .../supplied_timestamp/telegraf.conf | 4 +- .../expected.out | 0 .../message.avro | 0 .../telegraf.conf | 4 +- .../expected.out | 0 .../message.avro | 0 .../telegraf.conf | 4 +- .../union-any/expected.out | 0 .../union-any/message.json | 0 .../union-any/telegraf.conf | 2 +- .../union-array/expected.out | 0 .../union-array/message.json | 0 .../union-array/telegraf.conf | 2 +- .../union-nullable/expected.out | 0 .../union-nullable/message.json | 0 .../union-nullable/telegraf.conf | 2 +- .../union/expected.out | 0 .../union/message.json | 0 .../union/telegraf.conf | 2 +- 52 files changed, 77 insertions(+), 28 deletions(-) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/expected.err (100%) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/telegraf.conf (87%) rename plugins/parsers/avro/{testdata => testcases}/benchmark/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/benchmark/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/benchmark/telegraf.conf (94%) rename plugins/parsers/avro/{testdata => testcases}/config-both/expected.err (100%) rename plugins/parsers/avro/{testdata => testcases}/config-both/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/config-both/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/config-both/telegraf.conf (88%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/expected.err (100%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/telegraf.conf (64%) create mode 100644 plugins/parsers/avro/testcases/enum/expected.out create mode 100644 plugins/parsers/avro/testcases/enum/message.json create mode 100644 plugins/parsers/avro/testcases/enum/telegraf.conf rename plugins/parsers/avro/{testdata => testcases}/json-array/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/json-array/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/json-array/telegraf.conf (93%) rename plugins/parsers/avro/{testdata => testcases}/json-format/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/json-format/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/json-format/telegraf.conf (95%) rename plugins/parsers/avro/{testdata => testcases}/measurement_name_from_message/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/measurement_name_from_message/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/measurement_name_from_message/telegraf.conf (86%) rename plugins/parsers/avro/{testdata => testcases}/no-timestamp-format/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/no-timestamp-format/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/no-timestamp-format/telegraf.conf (86%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp/telegraf.conf (87%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_specified/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_specified/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_specified/telegraf.conf (85%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_unspecified/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_unspecified/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_unspecified/telegraf.conf (80%) rename plugins/parsers/avro/{testdata => testcases}/union-any/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union-any/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union-any/telegraf.conf (96%) rename plugins/parsers/avro/{testdata => testcases}/union-array/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union-array/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union-array/telegraf.conf (93%) rename plugins/parsers/avro/{testdata => testcases}/union-nullable/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union-nullable/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union-nullable/telegraf.conf (95%) rename plugins/parsers/avro/{testdata => testcases}/union/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union/telegraf.conf (96%) diff --git a/plugins/parsers/avro/parser_test.go b/plugins/parsers/avro/parser_test.go index 6ab43f2bbc21a..43c44ac8ccbf9 100644 --- a/plugins/parsers/avro/parser_test.go +++ b/plugins/parsers/avro/parser_test.go @@ -17,8 +17,8 @@ import ( ) func TestCases(t *testing.T) { - // Get all directories in testdata - folders, err := os.ReadDir("testdata") + // Get all test-case directories + folders, err := os.ReadDir("testcases") require.NoError(t, err) // Make sure testdata contains data require.NotEmpty(t, folders) @@ -30,7 +30,7 @@ func TestCases(t *testing.T) { for _, f := range folders { fname := f.Name() - testdataPath := filepath.Join("testdata", fname) + testdataPath := filepath.Join("testcases", fname) configFilename := filepath.Join(testdataPath, "telegraf.conf") expectedFilename := filepath.Join(testdataPath, "expected.out") expectedErrorFilename := filepath.Join(testdataPath, "expected.err") @@ -110,7 +110,7 @@ func BenchmarkParsing(b *testing.B) { } require.NoError(b, plugin.Init()) - benchmarkData, err := os.ReadFile(filepath.Join("testdata", "benchmark", "message.json")) + benchmarkData, err := os.ReadFile(filepath.Join("testcases", "benchmark", "message.json")) require.NoError(b, err) b.ResetTimer() @@ -131,7 +131,7 @@ func TestBenchmarkDataBinary(t *testing.T) { } require.NoError(t, plugin.Init()) - benchmarkDir := filepath.Join("testdata", "benchmark") + benchmarkDir := filepath.Join("testcases", "benchmark") // Read the expected valued from file parser := &influx.Parser{} @@ -167,7 +167,7 @@ func BenchmarkParsingBinary(b *testing.B) { require.NoError(b, plugin.Init()) // Re-encode the benchmark data from JSON to binary format - jsonData, err := os.ReadFile(filepath.Join("testdata", "benchmark", "message.json")) + jsonData, err := os.ReadFile(filepath.Join("testcases", "benchmark", "message.json")) require.NoError(b, err) codec, err := goavro.NewCodec(benchmarkSchema) require.NoError(b, err) diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/expected.err b/plugins/parsers/avro/testcases/bad-timestamp-format/expected.err similarity index 100% rename from plugins/parsers/avro/testdata/bad-timestamp-format/expected.err rename to plugins/parsers/avro/testcases/bad-timestamp-format/expected.err diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/expected.out b/plugins/parsers/avro/testcases/bad-timestamp-format/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/bad-timestamp-format/expected.out rename to plugins/parsers/avro/testcases/bad-timestamp-format/expected.out diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/message.avro b/plugins/parsers/avro/testcases/bad-timestamp-format/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/bad-timestamp-format/message.avro rename to plugins/parsers/avro/testcases/bad-timestamp-format/message.avro diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/telegraf.conf b/plugins/parsers/avro/testcases/bad-timestamp-format/telegraf.conf similarity index 87% rename from plugins/parsers/avro/testdata/bad-timestamp-format/telegraf.conf rename to plugins/parsers/avro/testcases/bad-timestamp-format/telegraf.conf index b4a89fad7095b..07297864fd38e 100644 --- a/plugins/parsers/avro/testdata/bad-timestamp-format/telegraf.conf +++ b/plugins/parsers/avro/testcases/bad-timestamp-format/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/bad-timestamp-format/message.avro"] + files = ["./testcases/bad-timestamp-format/message.avro"] data_format = "avro" avro_measurement = "measurement" @@ -26,4 +26,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/benchmark/expected.out b/plugins/parsers/avro/testcases/benchmark/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/benchmark/expected.out rename to plugins/parsers/avro/testcases/benchmark/expected.out diff --git a/plugins/parsers/avro/testdata/benchmark/message.json b/plugins/parsers/avro/testcases/benchmark/message.json similarity index 100% rename from plugins/parsers/avro/testdata/benchmark/message.json rename to plugins/parsers/avro/testcases/benchmark/message.json diff --git a/plugins/parsers/avro/testdata/benchmark/telegraf.conf b/plugins/parsers/avro/testcases/benchmark/telegraf.conf similarity index 94% rename from plugins/parsers/avro/testdata/benchmark/telegraf.conf rename to plugins/parsers/avro/testcases/benchmark/telegraf.conf index c20f7ccc753c9..bc67e9a315dde 100644 --- a/plugins/parsers/avro/testdata/benchmark/telegraf.conf +++ b/plugins/parsers/avro/testcases/benchmark/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/benchmark/message.json"] + files = ["./testcases/benchmark/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/config-both/expected.err b/plugins/parsers/avro/testcases/config-both/expected.err similarity index 100% rename from plugins/parsers/avro/testdata/config-both/expected.err rename to plugins/parsers/avro/testcases/config-both/expected.err diff --git a/plugins/parsers/avro/testdata/config-both/expected.out b/plugins/parsers/avro/testcases/config-both/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/config-both/expected.out rename to plugins/parsers/avro/testcases/config-both/expected.out diff --git a/plugins/parsers/avro/testdata/config-both/message.avro b/plugins/parsers/avro/testcases/config-both/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/config-both/message.avro rename to plugins/parsers/avro/testcases/config-both/message.avro diff --git a/plugins/parsers/avro/testdata/config-both/telegraf.conf b/plugins/parsers/avro/testcases/config-both/telegraf.conf similarity index 88% rename from plugins/parsers/avro/testdata/config-both/telegraf.conf rename to plugins/parsers/avro/testcases/config-both/telegraf.conf index 61cba90f22369..fb6bb5eb2dbb1 100644 --- a/plugins/parsers/avro/testdata/config-both/telegraf.conf +++ b/plugins/parsers/avro/testcases/config-both/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/config-both/message.avro"] + files = ["./testcases/config-both/message.avro"] data_format = "avro" avro_measurement = "measurement" @@ -25,4 +25,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/config-neither/expected.err b/plugins/parsers/avro/testcases/config-neither/expected.err similarity index 100% rename from plugins/parsers/avro/testdata/config-neither/expected.err rename to plugins/parsers/avro/testcases/config-neither/expected.err diff --git a/plugins/parsers/avro/testdata/config-neither/expected.out b/plugins/parsers/avro/testcases/config-neither/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/config-neither/expected.out rename to plugins/parsers/avro/testcases/config-neither/expected.out diff --git a/plugins/parsers/avro/testdata/config-neither/message.avro b/plugins/parsers/avro/testcases/config-neither/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/config-neither/message.avro rename to plugins/parsers/avro/testcases/config-neither/message.avro diff --git a/plugins/parsers/avro/testdata/config-neither/telegraf.conf b/plugins/parsers/avro/testcases/config-neither/telegraf.conf similarity index 64% rename from plugins/parsers/avro/testdata/config-neither/telegraf.conf rename to plugins/parsers/avro/testcases/config-neither/telegraf.conf index e52128df66d46..14adc8197ac2a 100644 --- a/plugins/parsers/avro/testdata/config-neither/telegraf.conf +++ b/plugins/parsers/avro/testcases/config-neither/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/config-neither/message.avro"] + files = ["./testcases/config-neither/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] diff --git a/plugins/parsers/avro/testcases/enum/expected.out b/plugins/parsers/avro/testcases/enum/expected.out new file mode 100644 index 0000000000000..15565a38cfdcf --- /dev/null +++ b/plugins/parsers/avro/testcases/enum/expected.out @@ -0,0 +1 @@ +sensors,name=temperature value_int=42i,status="OK" diff --git a/plugins/parsers/avro/testcases/enum/message.json b/plugins/parsers/avro/testcases/enum/message.json new file mode 100644 index 0000000000000..6d4f89a71540d --- /dev/null +++ b/plugins/parsers/avro/testcases/enum/message.json @@ -0,0 +1,7 @@ +{ + "name": "temperature", + "value": { + "int": 42 + }, + "status": "OK" +} \ No newline at end of file diff --git a/plugins/parsers/avro/testcases/enum/telegraf.conf b/plugins/parsers/avro/testcases/enum/telegraf.conf new file mode 100644 index 0000000000000..bcaa95c383d67 --- /dev/null +++ b/plugins/parsers/avro/testcases/enum/telegraf.conf @@ -0,0 +1,41 @@ +[[ inputs.file ]] + files = ["./testcases/enum/message.json"] + data_format = "avro" + + avro_format = "json" + avro_measurement = "sensors" + avro_tags = ["name"] + avro_fields = ["value", "status"] + avro_field_separator = "_" + avro_schema = ''' + { + "type": "record", + "name": "Metric", + "fields": [ + { + "name": "name", + "type": "string" + }, + { + "name": "value", + "type": [ + "null", + "int", + "string" + ] + }, + { + "name": "status", + "type": { + "type": "enum", + "name": "Status", + "symbols": [ + "UNKNOWN", + "OK", + "FAILURE" + ] + } + } + ] + } + ''' diff --git a/plugins/parsers/avro/testdata/json-array/expected.out b/plugins/parsers/avro/testcases/json-array/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/json-array/expected.out rename to plugins/parsers/avro/testcases/json-array/expected.out diff --git a/plugins/parsers/avro/testdata/json-array/message.json b/plugins/parsers/avro/testcases/json-array/message.json similarity index 100% rename from plugins/parsers/avro/testdata/json-array/message.json rename to plugins/parsers/avro/testcases/json-array/message.json diff --git a/plugins/parsers/avro/testdata/json-array/telegraf.conf b/plugins/parsers/avro/testcases/json-array/telegraf.conf similarity index 93% rename from plugins/parsers/avro/testdata/json-array/telegraf.conf rename to plugins/parsers/avro/testcases/json-array/telegraf.conf index 1133f3849f343..a7031ef8c1616 100644 --- a/plugins/parsers/avro/testdata/json-array/telegraf.conf +++ b/plugins/parsers/avro/testcases/json-array/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/json-array/message.json"] + files = ["./testcases/json-array/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/json-format/expected.out b/plugins/parsers/avro/testcases/json-format/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/json-format/expected.out rename to plugins/parsers/avro/testcases/json-format/expected.out diff --git a/plugins/parsers/avro/testdata/json-format/message.json b/plugins/parsers/avro/testcases/json-format/message.json similarity index 100% rename from plugins/parsers/avro/testdata/json-format/message.json rename to plugins/parsers/avro/testcases/json-format/message.json diff --git a/plugins/parsers/avro/testdata/json-format/telegraf.conf b/plugins/parsers/avro/testcases/json-format/telegraf.conf similarity index 95% rename from plugins/parsers/avro/testdata/json-format/telegraf.conf rename to plugins/parsers/avro/testcases/json-format/telegraf.conf index e2238f673f020..0eea8f8fc66df 100644 --- a/plugins/parsers/avro/testdata/json-format/telegraf.conf +++ b/plugins/parsers/avro/testcases/json-format/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/json-format/message.json"] + files = ["./testcases/json-format/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/measurement_name_from_message/expected.out b/plugins/parsers/avro/testcases/measurement_name_from_message/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/measurement_name_from_message/expected.out rename to plugins/parsers/avro/testcases/measurement_name_from_message/expected.out diff --git a/plugins/parsers/avro/testdata/measurement_name_from_message/message.avro b/plugins/parsers/avro/testcases/measurement_name_from_message/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/measurement_name_from_message/message.avro rename to plugins/parsers/avro/testcases/measurement_name_from_message/message.avro diff --git a/plugins/parsers/avro/testdata/measurement_name_from_message/telegraf.conf b/plugins/parsers/avro/testcases/measurement_name_from_message/telegraf.conf similarity index 86% rename from plugins/parsers/avro/testdata/measurement_name_from_message/telegraf.conf rename to plugins/parsers/avro/testcases/measurement_name_from_message/telegraf.conf index 4b7083ff8281d..97ece814c6ea3 100644 --- a/plugins/parsers/avro/testdata/measurement_name_from_message/telegraf.conf +++ b/plugins/parsers/avro/testcases/measurement_name_from_message/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/measurement_name_from_message/message.avro"] + files = ["./testcases/measurement_name_from_message/message.avro"] data_format = "avro" avro_measurement_field = "Measurement" avro_tags = [ "Server" ] @@ -27,4 +27,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/no-timestamp-format/expected.out b/plugins/parsers/avro/testcases/no-timestamp-format/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/no-timestamp-format/expected.out rename to plugins/parsers/avro/testcases/no-timestamp-format/expected.out diff --git a/plugins/parsers/avro/testdata/no-timestamp-format/message.avro b/plugins/parsers/avro/testcases/no-timestamp-format/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/no-timestamp-format/message.avro rename to plugins/parsers/avro/testcases/no-timestamp-format/message.avro diff --git a/plugins/parsers/avro/testdata/no-timestamp-format/telegraf.conf b/plugins/parsers/avro/testcases/no-timestamp-format/telegraf.conf similarity index 86% rename from plugins/parsers/avro/testdata/no-timestamp-format/telegraf.conf rename to plugins/parsers/avro/testcases/no-timestamp-format/telegraf.conf index a5d21090fa78d..c2f5d685857f3 100644 --- a/plugins/parsers/avro/testdata/no-timestamp-format/telegraf.conf +++ b/plugins/parsers/avro/testcases/no-timestamp-format/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/no-timestamp-format/message.avro"] + files = ["./testcases/no-timestamp-format/message.avro"] data_format = "avro" avro_measurement = "measurement" @@ -25,4 +25,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/supplied_timestamp/expected.out b/plugins/parsers/avro/testcases/supplied_timestamp/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp/expected.out rename to plugins/parsers/avro/testcases/supplied_timestamp/expected.out diff --git a/plugins/parsers/avro/testdata/supplied_timestamp/message.avro b/plugins/parsers/avro/testcases/supplied_timestamp/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp/message.avro rename to plugins/parsers/avro/testcases/supplied_timestamp/message.avro diff --git a/plugins/parsers/avro/testdata/supplied_timestamp/telegraf.conf b/plugins/parsers/avro/testcases/supplied_timestamp/telegraf.conf similarity index 87% rename from plugins/parsers/avro/testdata/supplied_timestamp/telegraf.conf rename to plugins/parsers/avro/testcases/supplied_timestamp/telegraf.conf index ee711eae3cd0e..f3eef5f38a7e1 100644 --- a/plugins/parsers/avro/testdata/supplied_timestamp/telegraf.conf +++ b/plugins/parsers/avro/testcases/supplied_timestamp/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/supplied_timestamp/message.avro"] + files = ["./testcases/supplied_timestamp/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] @@ -25,4 +25,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/expected.out b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/expected.out rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/expected.out diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/message.avro b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/message.avro rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/message.avro diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/telegraf.conf b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/telegraf.conf similarity index 85% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/telegraf.conf rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/telegraf.conf index 9ae72b5308cb2..79f29b5a4b802 100644 --- a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/telegraf.conf +++ b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/supplied_timestamp_fields_specified/message.avro"] + files = ["./testcases/supplied_timestamp_fields_specified/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] @@ -26,4 +26,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/expected.out b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/expected.out rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/expected.out diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/message.avro b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/message.avro rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/message.avro diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/telegraf.conf b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/telegraf.conf similarity index 80% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/telegraf.conf rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/telegraf.conf index d5c58355ebd92..925573f4f4fbe 100644 --- a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/telegraf.conf +++ b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/supplied_timestamp_fields_unspecified/message.avro"] + files = ["./testcases/supplied_timestamp_fields_unspecified/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] @@ -20,4 +20,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/union-any/expected.out b/plugins/parsers/avro/testcases/union-any/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union-any/expected.out rename to plugins/parsers/avro/testcases/union-any/expected.out diff --git a/plugins/parsers/avro/testdata/union-any/message.json b/plugins/parsers/avro/testcases/union-any/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union-any/message.json rename to plugins/parsers/avro/testcases/union-any/message.json diff --git a/plugins/parsers/avro/testdata/union-any/telegraf.conf b/plugins/parsers/avro/testcases/union-any/telegraf.conf similarity index 96% rename from plugins/parsers/avro/testdata/union-any/telegraf.conf rename to plugins/parsers/avro/testcases/union-any/telegraf.conf index e4966aee82c6b..1cea915bafe78 100644 --- a/plugins/parsers/avro/testdata/union-any/telegraf.conf +++ b/plugins/parsers/avro/testcases/union-any/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union-any/message.json"] + files = ["./testcases/union-any/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/union-array/expected.out b/plugins/parsers/avro/testcases/union-array/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union-array/expected.out rename to plugins/parsers/avro/testcases/union-array/expected.out diff --git a/plugins/parsers/avro/testdata/union-array/message.json b/plugins/parsers/avro/testcases/union-array/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union-array/message.json rename to plugins/parsers/avro/testcases/union-array/message.json diff --git a/plugins/parsers/avro/testdata/union-array/telegraf.conf b/plugins/parsers/avro/testcases/union-array/telegraf.conf similarity index 93% rename from plugins/parsers/avro/testdata/union-array/telegraf.conf rename to plugins/parsers/avro/testcases/union-array/telegraf.conf index 75ef5cb40de20..f0aa5546f735c 100644 --- a/plugins/parsers/avro/testdata/union-array/telegraf.conf +++ b/plugins/parsers/avro/testcases/union-array/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union-array/message.json"] + files = ["./testcases/union-array/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/union-nullable/expected.out b/plugins/parsers/avro/testcases/union-nullable/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union-nullable/expected.out rename to plugins/parsers/avro/testcases/union-nullable/expected.out diff --git a/plugins/parsers/avro/testdata/union-nullable/message.json b/plugins/parsers/avro/testcases/union-nullable/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union-nullable/message.json rename to plugins/parsers/avro/testcases/union-nullable/message.json diff --git a/plugins/parsers/avro/testdata/union-nullable/telegraf.conf b/plugins/parsers/avro/testcases/union-nullable/telegraf.conf similarity index 95% rename from plugins/parsers/avro/testdata/union-nullable/telegraf.conf rename to plugins/parsers/avro/testcases/union-nullable/telegraf.conf index a03a7e5dc8c59..790e8d676e5b4 100644 --- a/plugins/parsers/avro/testdata/union-nullable/telegraf.conf +++ b/plugins/parsers/avro/testcases/union-nullable/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union-nullable/message.json"] + files = ["./testcases/union-nullable/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/union/expected.out b/plugins/parsers/avro/testcases/union/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union/expected.out rename to plugins/parsers/avro/testcases/union/expected.out diff --git a/plugins/parsers/avro/testdata/union/message.json b/plugins/parsers/avro/testcases/union/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union/message.json rename to plugins/parsers/avro/testcases/union/message.json diff --git a/plugins/parsers/avro/testdata/union/telegraf.conf b/plugins/parsers/avro/testcases/union/telegraf.conf similarity index 96% rename from plugins/parsers/avro/testdata/union/telegraf.conf rename to plugins/parsers/avro/testcases/union/telegraf.conf index dad3fb0a2045c..9783ec4f6371e 100644 --- a/plugins/parsers/avro/testdata/union/telegraf.conf +++ b/plugins/parsers/avro/testcases/union/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union/message.json"] + files = ["./testcases/union/message.json"] data_format = "avro" avro_format = "json" From 304ab2e7807727c39b16563e77697d24ad46581f Mon Sep 17 00:00:00 2001 From: Alex Gokhale Date: Thu, 5 Dec 2024 16:38:32 +0000 Subject: [PATCH 053/170] feat(logging): Allow overriding message key for structured logging (#16242) --- cmd/telegraf/agent.conf | 4 ++++ cmd/telegraf/telegraf.go | 19 ++++++++-------- config/config.go | 4 ++++ docs/CONFIGURATION.md | 4 ++++ logger/logger.go | 2 ++ logger/structured_logger.go | 36 +++++++++++++++++++++--------- logger/structured_logger_test.go | 38 ++++++++++++++++++++++++++++++++ 7 files changed, 87 insertions(+), 20 deletions(-) diff --git a/cmd/telegraf/agent.conf b/cmd/telegraf/agent.conf index 12fd81ac4008c..dc2961c94bac0 100644 --- a/cmd/telegraf/agent.conf +++ b/cmd/telegraf/agent.conf @@ -57,6 +57,10 @@ ## "structured" or, on Windows, "eventlog". # logformat = "text" + ## Message key for structured logs, to override the default of "msg". + ## Ignored if `logformat` is not "structured". + # structured_log_message_key = "message" + ## Name of the file to be logged to or stderr if unset or empty. This ## setting is ignored for the "eventlog" format. # logfile = "" diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index de886c48eed7d..4fad778933f45 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -364,15 +364,16 @@ func (t *Telegraf) runAgent(ctx context.Context, reloadConfig bool) error { // Setup logging as configured. logConfig := &logger.Config{ - Debug: c.Agent.Debug || t.debug, - Quiet: c.Agent.Quiet || t.quiet, - LogTarget: c.Agent.LogTarget, - LogFormat: c.Agent.LogFormat, - Logfile: c.Agent.Logfile, - RotationInterval: time.Duration(c.Agent.LogfileRotationInterval), - RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize), - RotationMaxArchives: c.Agent.LogfileRotationMaxArchives, - LogWithTimezone: c.Agent.LogWithTimezone, + Debug: c.Agent.Debug || t.debug, + Quiet: c.Agent.Quiet || t.quiet, + LogTarget: c.Agent.LogTarget, + LogFormat: c.Agent.LogFormat, + Logfile: c.Agent.Logfile, + StructuredLogMessageKey: c.Agent.StructuredLogMessageKey, + RotationInterval: time.Duration(c.Agent.LogfileRotationInterval), + RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize), + RotationMaxArchives: c.Agent.LogfileRotationMaxArchives, + LogWithTimezone: c.Agent.LogWithTimezone, } if err := logger.SetupLogging(logConfig); err != nil { diff --git a/config/config.go b/config/config.go index 3ae2025313b4c..6a71646b095da 100644 --- a/config/config.go +++ b/config/config.go @@ -236,6 +236,10 @@ type AgentConfig struct { // Name of the file to be logged to or stderr if empty. Ignored for "eventlog" format. Logfile string `toml:"logfile"` + // Message key for structured logs, to override the default of "msg". + // Ignored if "logformat" is not "structured". + StructuredLogMessageKey string `toml:"structured_log_message_key"` + // The file will be rotated after the time interval specified. When set // to 0 no time based rotation is performed. LogfileRotationInterval Duration `toml:"logfile_rotation_interval"` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index bab126fa06f9c..f104c06049e32 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -307,6 +307,10 @@ The agent table configures Telegraf and the defaults used across all plugins. "structured" or, on Windows, "eventlog". The output file (if any) is determined by the `logfile` setting. +- **structured_log_message_key**: + Message key for structured logs, to override the default of "msg". + Ignored if `logformat` is not "structured". + - **logfile**: Name of the file to be logged to or stderr if unset or empty. This setting is ignored for the "eventlog" format. diff --git a/logger/logger.go b/logger/logger.go index c344838c0b667..ab5a2ff2fb4c9 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -195,6 +195,8 @@ type Config struct { LogWithTimezone string // Logger instance name InstanceName string + // Structured logging message key + StructuredLogMessageKey string // internal log-level logLevel telegraf.LogLevel diff --git a/logger/structured_logger.go b/logger/structured_logger.go index dfe7dc2756f5e..5a4ed86a40e93 100644 --- a/logger/structured_logger.go +++ b/logger/structured_logger.go @@ -41,17 +41,19 @@ func (l *structuredLogger) Print(level telegraf.LogLevel, ts time.Time, _ string } } -var defaultStructuredHandlerOptions = &slog.HandlerOptions{ - Level: slog.Level(-99), - ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr { - // Translate the Telegraf log-levels to strings - if attr.Key == slog.LevelKey { - if level, ok := attr.Value.Any().(slog.Level); ok { - attr.Value = slog.StringValue(telegraf.LogLevel(level).String()) - } +var defaultReplaceAttr = func(_ []string, attr slog.Attr) slog.Attr { + // Translate the Telegraf log-levels to strings + if attr.Key == slog.LevelKey { + if level, ok := attr.Value.Any().(slog.Level); ok { + attr.Value = slog.StringValue(telegraf.LogLevel(level).String()) } - return attr - }, + } + return attr +} + +var defaultStructuredHandlerOptions = &slog.HandlerOptions{ + Level: slog.Level(-99), + ReplaceAttr: defaultReplaceAttr, } func init() { @@ -70,8 +72,20 @@ func init() { writer = w } + structuredHandlerOptions := defaultStructuredHandlerOptions + + if cfg.StructuredLogMessageKey != "" { + structuredHandlerOptions.ReplaceAttr = func(groups []string, attr slog.Attr) slog.Attr { + if attr.Key == slog.MessageKey { + attr.Key = cfg.StructuredLogMessageKey + } + + return defaultReplaceAttr(groups, attr) + } + } + return &structuredLogger{ - handler: slog.NewJSONHandler(writer, defaultStructuredHandlerOptions), + handler: slog.NewJSONHandler(writer, structuredHandlerOptions), output: writer, errlog: log.New(os.Stderr, "", 0), }, nil diff --git a/logger/structured_logger_test.go b/logger/structured_logger_test.go index 89208563a225b..1721bc48f5bcf 100644 --- a/logger/structured_logger_test.go +++ b/logger/structured_logger_test.go @@ -307,6 +307,44 @@ func TestStructuredWriteToFileInRotation(t *testing.T) { require.Len(t, files, 2) } +func TestStructuredLogMessageKey(t *testing.T) { + instance = defaultHandler() + + tmpfile, err := os.CreateTemp("", "") + require.NoError(t, err) + defer os.Remove(tmpfile.Name()) + + cfg := &Config{ + Logfile: tmpfile.Name(), + LogFormat: "structured", + RotationMaxArchives: -1, + Debug: true, + StructuredLogMessageKey: "message", + } + require.NoError(t, SetupLogging(cfg)) + + l := New("testing", "test", "") + l.Info("TEST") + + buf, err := os.ReadFile(tmpfile.Name()) + require.NoError(t, err) + + expected := map[string]interface{}{ + "level": "INFO", + "message": "TEST", + "category": "testing", + "plugin": "test", + } + + var actual map[string]interface{} + require.NoError(t, json.Unmarshal(buf, &actual)) + + require.Contains(t, actual, "time") + require.NotEmpty(t, actual["time"]) + delete(actual, "time") + require.Equal(t, expected, actual) +} + func BenchmarkTelegrafStructuredLogWrite(b *testing.B) { // Discard all logging output l := &structuredLogger{ From be2d5efed1f3a2b76fed2537050c1eb47c3e9369 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 6 Dec 2024 11:23:35 +0100 Subject: [PATCH 054/170] feat(outputs): Add rate-limiting infrastructure (#16258) --- internal/errors.go | 6 +- models/running_output_test.go | 227 ++++++++++- plugins/common/ratelimiter/config.go | 19 + plugins/common/ratelimiter/limiters.go | 66 ++++ plugins/common/ratelimiter/limiters_test.go | 176 +++++++++ plugins/common/ratelimiter/serializers.go | 100 +++++ .../common/ratelimiter/serializers_test.go | 351 ++++++++++++++++++ 7 files changed, 924 insertions(+), 21 deletions(-) create mode 100644 plugins/common/ratelimiter/config.go create mode 100644 plugins/common/ratelimiter/limiters.go create mode 100644 plugins/common/ratelimiter/limiters_test.go create mode 100644 plugins/common/ratelimiter/serializers.go create mode 100644 plugins/common/ratelimiter/serializers_test.go diff --git a/internal/errors.go b/internal/errors.go index d1e098ea441ce..a36bda794932c 100644 --- a/internal/errors.go +++ b/internal/errors.go @@ -2,7 +2,11 @@ package internal import "errors" -var ErrNotConnected = errors.New("not connected") +var ( + ErrNotConnected = errors.New("not connected") + ErrSerialization = errors.New("serialization of metric(s) failed") + ErrSizeLimitReached = errors.New("size limit reached") +) // StartupError indicates an error that occurred during startup of a plugin // e.g. due to connectivity issues or resources being not yet available. diff --git a/models/running_output_test.go b/models/running_output_test.go index 3c8b9e5951e1a..9a60481d52fa6 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -245,7 +245,7 @@ func TestRunningOutputWriteFail(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 4, 12) // Fill buffer to limit twice @@ -264,7 +264,7 @@ func TestRunningOutputWriteFail(t *testing.T) { // no successful flush yet require.Empty(t, m.Metrics()) - m.failWrite = false + m.batchAcceptSize = 0 err = ro.Write() require.NoError(t, err) @@ -277,7 +277,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 100, 1000) // add 5 metrics @@ -293,7 +293,8 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { // no successful flush yet require.Empty(t, m.Metrics()) - m.failWrite = false + m.batchAcceptSize = 0 + // add 5 more metrics for _, metric := range next5 { ro.AddMetric(metric) @@ -314,7 +315,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 5, 100) // add 5 metrics @@ -357,7 +358,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { // no successful flush yet require.Empty(t, m.Metrics()) - m.failWrite = false + m.batchAcceptSize = 0 err = ro.Write() require.NoError(t, err) @@ -377,7 +378,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 5, 1000) // add 5 metrics @@ -399,7 +400,8 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { require.Error(t, err) // unset fail and write metrics - m.failWrite = false + m.batchAcceptSize = 0 + err = ro.Write() require.NoError(t, err) @@ -620,7 +622,7 @@ func TestRunningOutputNonRetryableStartupBehaviorDefault(t *testing.T) { } } -func TestRunningOutputUntypedtartupBehaviorIgnore(t *testing.T) { +func TestRunningOutputUntypedStartupBehaviorIgnore(t *testing.T) { serr := errors.New("untyped err") for _, behavior := range []string{"", "error", "retry", "ignore"} { @@ -692,12 +694,181 @@ func TestRunningOutputPartiallyStarted(t *testing.T) { require.Equal(t, 3, mo.writes) } +func TestRunningOutputWritePartialSuccess(t *testing.T) { + plugin := &mockOutput{ + batchAcceptSize: 4, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 4) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 8) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.Write()) + testutil.RequireMetricsEqual(t, append(first5, next5...), plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + +func TestRunningOutputWritePartialSuccessAndLoss(t *testing.T) { + lost := 0 + plugin := &mockOutput{ + batchAcceptSize: 4, + metricFatalIndex: &lost, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + expected := []telegraf.Metric{ + /* fatal, */ first5[1], first5[2], first5[3], + /* fatal, */ next5[0], next5[1], next5[2], + next5[3], next5[4], + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 3) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 6) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.Write()) + testutil.RequireMetricsEqual(t, expected, plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + +func TestRunningOutputWriteBatchPartialSuccess(t *testing.T) { + plugin := &mockOutput{ + batchAcceptSize: 4, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 4) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 8) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.WriteBatch()) + testutil.RequireMetricsEqual(t, append(first5, next5...), plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + +func TestRunningOutputWriteBatchPartialSuccessAndLoss(t *testing.T) { + lost := 0 + plugin := &mockOutput{ + batchAcceptSize: 4, + metricFatalIndex: &lost, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + expected := []telegraf.Metric{ + /* fatal, */ first5[1], first5[2], first5[3], + /* fatal, */ next5[0], next5[1], next5[2], + next5[3], next5[4], + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 3) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 6) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.WriteBatch()) + testutil.RequireMetricsEqual(t, expected, plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ Filter: Filter{}, } - m := &perfOutput{} ro := NewRunningOutput(m, conf, 1000, 10000) @@ -712,7 +883,6 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { conf := &OutputConfig{ Filter: Filter{}, } - m := &perfOutput{} ro := NewRunningOutput(m, conf, 1000, 10000) @@ -729,10 +899,8 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { conf := &OutputConfig{ Filter: Filter{}, } - m := &perfOutput{failWrite: true} ro := NewRunningOutput(m, conf, 1000, 10000) - for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) } @@ -743,9 +911,11 @@ type mockOutput struct { metrics []telegraf.Metric - // if true, mock write failure - failWrite bool + // Failing output simulation + batchAcceptSize int + metricFatalIndex *int + // Startup error simulation startupError error startupErrorCount int writes int @@ -761,11 +931,11 @@ func (m *mockOutput) Connect() error { return m.startupError } -func (m *mockOutput) Close() error { +func (*mockOutput) Close() error { return nil } -func (m *mockOutput) SampleConfig() string { +func (*mockOutput) SampleConfig() string { return "" } @@ -774,12 +944,29 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { m.Lock() defer m.Unlock() - if m.failWrite { + + // Simulate a failed write + if m.batchAcceptSize < 0 { return errors.New("failed write") } - m.metrics = append(m.metrics, metrics...) - return nil + // Simulate a successful write + if m.batchAcceptSize == 0 || len(metrics) <= m.batchAcceptSize { + m.metrics = append(m.metrics, metrics...) + return nil + } + + // Simulate a partially successful write + werr := &internal.PartialWriteError{Err: internal.ErrSizeLimitReached} + for i, x := range metrics { + if m.metricFatalIndex != nil && i == *m.metricFatalIndex { + werr.MetricsReject = append(werr.MetricsReject, i) + } else if i < m.batchAcceptSize { + m.metrics = append(m.metrics, x) + werr.MetricsAccept = append(werr.MetricsAccept, i) + } + } + return werr } func (m *mockOutput) Metrics() []telegraf.Metric { diff --git a/plugins/common/ratelimiter/config.go b/plugins/common/ratelimiter/config.go new file mode 100644 index 0000000000000..a2ca077c05f59 --- /dev/null +++ b/plugins/common/ratelimiter/config.go @@ -0,0 +1,19 @@ +package ratelimiter + +import ( + "time" + + "github.com/influxdata/telegraf/config" +) + +type RateLimitConfig struct { + Limit config.Size `toml:"rate_limit"` + Period config.Duration `toml:"rate_limit_period"` +} + +func (cfg *RateLimitConfig) CreateRateLimiter() *RateLimiter { + return &RateLimiter{ + limit: int64(cfg.Limit), + period: time.Duration(cfg.Period), + } +} diff --git a/plugins/common/ratelimiter/limiters.go b/plugins/common/ratelimiter/limiters.go new file mode 100644 index 0000000000000..f24d08b6239f1 --- /dev/null +++ b/plugins/common/ratelimiter/limiters.go @@ -0,0 +1,66 @@ +package ratelimiter + +import ( + "errors" + "math" + "time" +) + +var ( + ErrLimitExceeded = errors.New("not enough tokens") +) + +type RateLimiter struct { + limit int64 + period time.Duration + periodStart time.Time + remaining int64 +} + +func (r *RateLimiter) Remaining(t time.Time) int64 { + if r.limit == 0 { + return math.MaxInt64 + } + + // Check for corner case + if !r.periodStart.Before(t) { + return 0 + } + + // We are in a new period, so the complete size is available + deltat := t.Sub(r.periodStart) + if deltat >= r.period { + return r.limit + } + + return r.remaining +} + +func (r *RateLimiter) Accept(t time.Time, used int64) { + if r.limit == 0 || r.periodStart.After(t) { + return + } + + // Remember the first query and reset if we are in a new period + if r.periodStart.IsZero() { + r.periodStart = t + r.remaining = r.limit + } else if deltat := t.Sub(r.periodStart); deltat >= r.period { + r.periodStart = r.periodStart.Add(deltat.Truncate(r.period)) + r.remaining = r.limit + } + + // Update the state + r.remaining = max(r.remaining-used, 0) +} + +func (r *RateLimiter) Undo(t time.Time, used int64) { + // Do nothing if we are not in the current period or unlimited because we + // already reset the limit on a new window. + if r.limit == 0 || r.periodStart.IsZero() || r.periodStart.After(t) || t.Sub(r.periodStart) >= r.period { + return + } + + // Undo the state update + r.remaining = min(r.remaining+used, r.limit) +} diff --git a/plugins/common/ratelimiter/limiters_test.go b/plugins/common/ratelimiter/limiters_test.go new file mode 100644 index 0000000000000..e886b1cc80221 --- /dev/null +++ b/plugins/common/ratelimiter/limiters_test.go @@ -0,0 +1,176 @@ +package ratelimiter + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf/config" + "github.com/stretchr/testify/require" +) + +func TestUnlimited(t *testing.T) { + cfg := &RateLimitConfig{} + limiter := cfg.CreateRateLimiter() + + start := time.Now() + end := start.Add(30 * time.Minute) + for ts := start; ts.Before(end); ts = ts.Add(1 * time.Minute) { + require.EqualValues(t, int64(math.MaxInt64), limiter.Remaining(ts)) + } +} + +func TestUnlimitedWithPeriod(t *testing.T) { + cfg := &RateLimitConfig{ + Period: config.Duration(5 * time.Minute), + } + limiter := cfg.CreateRateLimiter() + + start := time.Now() + end := start.Add(30 * time.Minute) + for ts := start; ts.Before(end); ts = ts.Add(1 * time.Minute) { + require.EqualValues(t, int64(math.MaxInt64), limiter.Remaining(ts)) + } +} + +func TestLimited(t *testing.T) { + tests := []struct { + name string + cfg *RateLimitConfig + step time.Duration + request []int64 + expected []int64 + }{ + { + name: "constant usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{300}, + expected: []int64{1024, 724, 424, 124, 0, 1024, 724, 424, 124, 0}, + }, + { + name: "variable usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{256, 128, 512, 64, 64, 1024, 0, 0, 0, 0, 128, 4096, 4096, 4096, 4096, 4096}, + expected: []int64{1024, 768, 640, 128, 64, 1024, 0, 0, 0, 0, 1024, 896, 0, 0, 0, 1024}, + }, + } + + // Run the test with an offset of period multiples + for _, tt := range tests { + t.Run(tt.name+" at period", func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + } + }) + } + + // Run the test at a time of period multiples + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step).Add(1 * time.Second) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + } + }) + } +} + +func TestUndo(t *testing.T) { + tests := []struct { + name string + cfg *RateLimitConfig + step time.Duration + request []int64 + expected []int64 + }{ + { + name: "constant usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{300}, + expected: []int64{1024, 724, 424, 124, 124, 1024, 724, 424, 124, 124}, + }, + { + name: "variable usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{256, 128, 512, 64, 64, 1024, 0, 0, 0, 0, 128, 4096, 4096, 4096, 4096, 4096}, + expected: []int64{1024, 768, 640, 128, 64, 1024, 0, 0, 0, 0, 1024, 896, 896, 896, 896, 1024}, + }, + } + + // Run the test with an offset of period multiples + for _, tt := range tests { + t.Run(tt.name+" at period", func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + // Undo too large operations + if tt.request[i%len(tt.request)] > remaining { + limiter.Undo(ts, use) + } + } + }) + } + + // Run the test at a time of period multiples + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step).Add(1 * time.Second) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + // Undo too large operations + if tt.request[i%len(tt.request)] > remaining { + limiter.Undo(ts, use) + } + } + }) + } +} diff --git a/plugins/common/ratelimiter/serializers.go b/plugins/common/ratelimiter/serializers.go new file mode 100644 index 0000000000000..6bd6ce78e0ff9 --- /dev/null +++ b/plugins/common/ratelimiter/serializers.go @@ -0,0 +1,100 @@ +package ratelimiter + +import ( + "bytes" + "math" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" +) + +// Serializer interface abstracting the different implementations of a +// limited-size serializer +type Serializer interface { + Serialize(metric telegraf.Metric, limit int64) ([]byte, error) + SerializeBatch(metrics []telegraf.Metric, limit int64) ([]byte, error) +} + +// Individual serializers do serialize each metric individually using the +// serializer's Serialize() function and add the resulting output to the buffer +// until the limit is reached. This only works for serializers NOT requiring +// the serialization of a batch as-a-whole. +type IndividualSerializer struct { + serializer telegraf.Serializer + buffer *bytes.Buffer +} + +func NewIndividualSerializer(s telegraf.Serializer) *IndividualSerializer { + return &IndividualSerializer{ + serializer: s, + buffer: &bytes.Buffer{}, + } +} + +func (s *IndividualSerializer) Serialize(metric telegraf.Metric, limit int64) ([]byte, error) { + // Do the serialization + buf, err := s.serializer.Serialize(metric) + if err != nil { + return nil, err + } + + // The serialized metric fits into the limit, so output it + if buflen := int64(len(buf)); buflen <= limit { + return buf, nil + } + + // The serialized metric exceeds the limit + return nil, internal.ErrSizeLimitReached +} + +func (s *IndividualSerializer) SerializeBatch(metrics []telegraf.Metric, limit int64) ([]byte, error) { + // Grow the buffer so it can hold at least the required size. This will + // save us from reallocate often + s.buffer.Reset() + if limit > 0 && limit < int64(math.MaxInt) { + s.buffer.Grow(int(limit)) + } + + // Prepare a potential write error and be optimistic + werr := &internal.PartialWriteError{ + MetricsAccept: make([]int, 0, len(metrics)), + } + + // Iterate through the metrics, serialize them and add them to the output + // buffer if they are within the size limit. + var used int64 + for i, m := range metrics { + buf, err := s.serializer.Serialize(m) + if err != nil { + // Failing serialization is a fatal error so mark the metric as such + werr.Err = internal.ErrSerialization + werr.MetricsReject = append(werr.MetricsReject, i) + werr.MetricsRejectErrors = append(werr.MetricsRejectErrors, err) + continue + } + + // The serialized metric fits into the limit, so add it to the output + if usedAdded := used + int64(len(buf)); usedAdded <= limit { + if _, err := s.buffer.Write(buf); err != nil { + return nil, err + } + werr.MetricsAccept = append(werr.MetricsAccept, i) + used = usedAdded + continue + } + + // Return only the size-limit-reached error if all metrics failed. + if used == 0 { + return nil, internal.ErrSizeLimitReached + } + + // Adding the serialized metric would exceed the limit so exit with an + // WriteError and fill in the required information + werr.Err = internal.ErrSizeLimitReached + break + } + if werr.Err != nil { + return s.buffer.Bytes(), werr + } + return s.buffer.Bytes(), nil +} diff --git a/plugins/common/ratelimiter/serializers_test.go b/plugins/common/ratelimiter/serializers_test.go new file mode 100644 index 0000000000000..06cc88a395674 --- /dev/null +++ b/plugins/common/ratelimiter/serializers_test.go @@ -0,0 +1,351 @@ +package ratelimiter + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/stretchr/testify/require" +) + +func TestIndividualSerializer(t *testing.T) { + input := []telegraf.Metric{ + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 123, + "temperature": 25.0, + "pressure": 1023.4, + }, + time.Unix(1722443551, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "B", + "status": "failed", + }, + map[string]interface{}{ + "operating_hours": 8430, + "temperature": 65.2, + "pressure": 985.9, + }, + time.Unix(1722443554, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "C", + "status": "warning", + }, + map[string]interface{}{ + "operating_hours": 6765, + "temperature": 42.5, + "pressure": 986.1, + }, + time.Unix(1722443555, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "localhost", + "location": "factory_north", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443556, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 5544, + "temperature": 18.6, + "pressure": 1069.4, + }, + time.Unix(1722443552, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "B", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 65, + "temperature": 29.7, + "pressure": 1101.2, + }, + time.Unix(1722443553, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443559, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "C", + "status": "off", + }, + map[string]interface{}{ + "operating_hours": 0, + "temperature": 0.0, + "pressure": 0.0, + }, + time.Unix(1722443562, 0), + ), + } + //nolint:lll // Resulting metrics should not be wrapped for readability + expected := []string{ + "serializer_test,location=factory_north,machine=A,source=localhost,status=ok operating_hours=123i,pressure=1023.4,temperature=25 1722443551000000000\n" + + "serializer_test,location=factory_north,machine=B,source=localhost,status=failed operating_hours=8430i,pressure=985.9,temperature=65.2 1722443554000000000\n", + "serializer_test,location=factory_north,machine=C,source=localhost,status=warning operating_hours=6765i,pressure=986.1,temperature=42.5 1722443555000000000\n" + + "device,location=factory_north,source=localhost status=\"ok\" 1722443556000000000\n" + + "serializer_test,location=factory_south,machine=A,source=gateway_af43e,status=ok operating_hours=5544i,pressure=1069.4,temperature=18.6 1722443552000000000\n", + "serializer_test,location=factory_south,machine=B,source=gateway_af43e,status=ok operating_hours=65i,pressure=1101.2,temperature=29.7 1722443553000000000\n" + + "device,location=factory_south,source=gateway_af43e status=\"ok\" 1722443559000000000\n" + + "serializer_test,location=factory_south,machine=C,source=gateway_af43e,status=off operating_hours=0i,pressure=0,temperature=0 1722443562000000000\n", + } + + // Setup the limited serializer + s := &influx.Serializer{SortFields: true} + require.NoError(t, s.Init()) + serializer := NewIndividualSerializer(s) + + var werr *internal.PartialWriteError + + // Do the first serialization runs with all metrics + buf, err := serializer.SerializeBatch(input, 400) + require.ErrorAs(t, err, &werr) + require.ErrorIs(t, werr.Err, internal.ErrSizeLimitReached) + require.EqualValues(t, []int{0, 1}, werr.MetricsAccept) + require.Empty(t, werr.MetricsReject) + require.Equal(t, expected[0], string(buf)) + + // Run again with the successful metrics removed + buf, err = serializer.SerializeBatch(input[2:], 400) + require.ErrorAs(t, err, &werr) + require.ErrorIs(t, werr.Err, internal.ErrSizeLimitReached) + require.EqualValues(t, []int{0, 1, 2}, werr.MetricsAccept) + require.Empty(t, werr.MetricsReject) + require.Equal(t, expected[1], string(buf)) + + // Final run with the successful metrics removed + buf, err = serializer.SerializeBatch(input[5:], 400) + require.NoError(t, err) + require.Equal(t, expected[2], string(buf)) +} + +func TestIndividualSerializerFirstTooBig(t *testing.T) { + input := []telegraf.Metric{ + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 123, + "temperature": 25.0, + "pressure": 1023.4, + }, + time.Unix(1722443551, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "B", + "status": "failed", + }, + map[string]interface{}{ + "operating_hours": 8430, + "temperature": 65.2, + "pressure": 985.9, + }, + time.Unix(1722443554, 0), + ), + } + + // Setup the limited serializer + s := &influx.Serializer{SortFields: true} + require.NoError(t, s.Init()) + serializer := NewIndividualSerializer(s) + + // The first metric will already exceed the size so all metrics fail and + // we expect a shortcut error. + buf, err := serializer.SerializeBatch(input, 100) + require.ErrorIs(t, err, internal.ErrSizeLimitReached) + require.Empty(t, buf) +} + +func TestIndividualSerializerUnlimited(t *testing.T) { + input := []telegraf.Metric{ + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 123, + "temperature": 25.0, + "pressure": 1023.4, + }, + time.Unix(1722443551, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "B", + "status": "failed", + }, + map[string]interface{}{ + "operating_hours": 8430, + "temperature": 65.2, + "pressure": 985.9, + }, + time.Unix(1722443554, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "C", + "status": "warning", + }, + map[string]interface{}{ + "operating_hours": 6765, + "temperature": 42.5, + "pressure": 986.1, + }, + time.Unix(1722443555, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "localhost", + "location": "factory_north", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443556, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 5544, + "temperature": 18.6, + "pressure": 1069.4, + }, + time.Unix(1722443552, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "B", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 65, + "temperature": 29.7, + "pressure": 1101.2, + }, + time.Unix(1722443553, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443559, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "C", + "status": "off", + }, + map[string]interface{}{ + "operating_hours": 0, + "temperature": 0.0, + "pressure": 0.0, + }, + time.Unix(1722443562, 0), + ), + } + //nolint:lll // Resulting metrics should not be wrapped for readability + expected := "serializer_test,location=factory_north,machine=A,source=localhost,status=ok operating_hours=123i,pressure=1023.4,temperature=25 1722443551000000000\n" + + "serializer_test,location=factory_north,machine=B,source=localhost,status=failed operating_hours=8430i,pressure=985.9,temperature=65.2 1722443554000000000\n" + + "serializer_test,location=factory_north,machine=C,source=localhost,status=warning operating_hours=6765i,pressure=986.1,temperature=42.5 1722443555000000000\n" + + "device,location=factory_north,source=localhost status=\"ok\" 1722443556000000000\n" + + "serializer_test,location=factory_south,machine=A,source=gateway_af43e,status=ok operating_hours=5544i,pressure=1069.4,temperature=18.6 1722443552000000000\n" + + "serializer_test,location=factory_south,machine=B,source=gateway_af43e,status=ok operating_hours=65i,pressure=1101.2,temperature=29.7 1722443553000000000\n" + + "device,location=factory_south,source=gateway_af43e status=\"ok\" 1722443559000000000\n" + + "serializer_test,location=factory_south,machine=C,source=gateway_af43e,status=off operating_hours=0i,pressure=0,temperature=0 1722443562000000000\n" + + // Setup the limited serializer + s := &influx.Serializer{SortFields: true} + require.NoError(t, s.Init()) + serializer := NewIndividualSerializer(s) + + // Do the first serialization runs with all metrics + buf, err := serializer.SerializeBatch(input, math.MaxInt64) + require.NoError(t, err) + require.Equal(t, expected, string(buf)) +} From 11709858e33603c6504572ca8a032cf1665ae372 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:50:21 +0100 Subject: [PATCH 055/170] feat(outputs.influxdb_v2): Add rate limit implementation (#15742) --- plugins/common/ratelimiter/config.go | 8 +- plugins/common/ratelimiter/limiters_test.go | 24 +++- plugins/outputs/influxdb_v2/README.md | 6 + plugins/outputs/influxdb_v2/http.go | 121 ++++++++++++------ plugins/outputs/influxdb_v2/influxdb_v2.go | 22 +++- .../outputs/influxdb_v2/influxdb_v2_test.go | 113 ++++++++++++++++ plugins/outputs/influxdb_v2/sample.conf | 6 + 7 files changed, 245 insertions(+), 55 deletions(-) diff --git a/plugins/common/ratelimiter/config.go b/plugins/common/ratelimiter/config.go index a2ca077c05f59..9ebbeb2704c16 100644 --- a/plugins/common/ratelimiter/config.go +++ b/plugins/common/ratelimiter/config.go @@ -1,6 +1,7 @@ package ratelimiter import ( + "errors" "time" "github.com/influxdata/telegraf/config" @@ -11,9 +12,12 @@ type RateLimitConfig struct { Period config.Duration `toml:"rate_limit_period"` } -func (cfg *RateLimitConfig) CreateRateLimiter() *RateLimiter { +func (cfg *RateLimitConfig) CreateRateLimiter() (*RateLimiter, error) { + if cfg.Limit > 0 && cfg.Period <= 0 { + return nil, errors.New("invalid period for rate-limit") + } return &RateLimiter{ limit: int64(cfg.Limit), period: time.Duration(cfg.Period), - } + }, nil } diff --git a/plugins/common/ratelimiter/limiters_test.go b/plugins/common/ratelimiter/limiters_test.go index e886b1cc80221..28b53159ce448 100644 --- a/plugins/common/ratelimiter/limiters_test.go +++ b/plugins/common/ratelimiter/limiters_test.go @@ -9,9 +9,16 @@ import ( "github.com/stretchr/testify/require" ) +func TestInvalidPeriod(t *testing.T) { + cfg := &RateLimitConfig{Limit: config.Size(1024)} + _, err := cfg.CreateRateLimiter() + require.ErrorContains(t, err, "invalid period for rate-limit") +} + func TestUnlimited(t *testing.T) { cfg := &RateLimitConfig{} - limiter := cfg.CreateRateLimiter() + limiter, err := cfg.CreateRateLimiter() + require.NoError(t, err) start := time.Now() end := start.Add(30 * time.Minute) @@ -24,7 +31,8 @@ func TestUnlimitedWithPeriod(t *testing.T) { cfg := &RateLimitConfig{ Period: config.Duration(5 * time.Minute), } - limiter := cfg.CreateRateLimiter() + limiter, err := cfg.CreateRateLimiter() + require.NoError(t, err) start := time.Now() end := start.Add(30 * time.Minute) @@ -67,7 +75,8 @@ func TestLimited(t *testing.T) { for _, tt := range tests { t.Run(tt.name+" at period", func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step) @@ -85,7 +94,8 @@ func TestLimited(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step).Add(1 * time.Second) @@ -134,7 +144,8 @@ func TestUndo(t *testing.T) { for _, tt := range tests { t.Run(tt.name+" at period", func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step) @@ -156,7 +167,8 @@ func TestUndo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step).Add(1 * time.Second) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 239e953e6fcc9..b9a78f0b00a04 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -101,6 +101,12 @@ to use them. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Rate limits for sending data (disabled by default) + ## Available, uncompressed payload size e.g. "5Mb" + # rate_limit = "unlimited" + ## Fixed time-window for the available payload size e.g. "5m" + # rate_limit_period = "0s" ``` ## Metrics diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 34e698dd75c94..8a622a5f4b522 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -22,7 +22,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/influxdata/telegraf/plugins/common/ratelimiter" ) type APIError struct { @@ -59,8 +59,9 @@ type httpClient struct { pingTimeout config.Duration readIdleTimeout config.Duration tlsConfig *tls.Config - serializer *influx.Serializer encoder internal.ContentEncoder + serializer ratelimiter.Serializer + rateLimiter *ratelimiter.RateLimiter client *http.Client params url.Values retryTime time.Time @@ -160,52 +161,69 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } batches := make(map[string][]telegraf.Metric) + batchIndices := make(map[string][]int) if c.bucketTag == "" { - err := c.writeBatch(ctx, c.bucket, metrics) - if err != nil { - var apiErr *APIError - if errors.As(err, &apiErr) { - if apiErr.StatusCode == http.StatusRequestEntityTooLarge { - return c.splitAndWriteBatch(ctx, c.bucket, metrics) - } - } - - return err + batches[c.bucket] = metrics + batchIndices[c.bucket] = make([]int, len(metrics)) + for i := range metrics { + batchIndices[c.bucket][i] = i } } else { - for _, metric := range metrics { + for i, metric := range metrics { bucket, ok := metric.GetTag(c.bucketTag) if !ok { bucket = c.bucket - } - - if _, ok := batches[bucket]; !ok { - batches[bucket] = make([]telegraf.Metric, 0) - } - - if c.excludeBucketTag { - // Avoid modifying the metric in case we need to retry the request. + } else if c.excludeBucketTag { + // Avoid modifying the metric if we do remove the tag metric = metric.Copy() metric.Accept() metric.RemoveTag(c.bucketTag) } batches[bucket] = append(batches[bucket], metric) + batchIndices[c.bucket] = append(batchIndices[c.bucket], i) + } + } + + var wErr internal.PartialWriteError + for bucket, batch := range batches { + err := c.writeBatch(ctx, bucket, batch) + if err == nil { + wErr.MetricsAccept = append(wErr.MetricsAccept, batchIndices[bucket]...) + continue } - for bucket, batch := range batches { - err := c.writeBatch(ctx, bucket, batch) - if err != nil { - var apiErr *APIError - if errors.As(err, &apiErr) { - if apiErr.StatusCode == http.StatusRequestEntityTooLarge { - return c.splitAndWriteBatch(ctx, c.bucket, metrics) - } - } - - return err + // Check if the request was too large and split it + var apiErr *APIError + if errors.As(err, &apiErr) { + if apiErr.StatusCode == http.StatusRequestEntityTooLarge { + return c.splitAndWriteBatch(ctx, c.bucket, metrics) } + wErr.Err = err + wErr.MetricsReject = append(wErr.MetricsReject, batchIndices[bucket]...) + return &wErr } + + // Check if we got a write error and if so, translate the returned + // metric indices to return the original indices in case of bucketing + var writeErr *internal.PartialWriteError + if errors.As(err, &writeErr) { + wErr.Err = writeErr.Err + for _, idx := range writeErr.MetricsAccept { + wErr.MetricsAccept = append(wErr.MetricsAccept, batchIndices[bucket][idx]) + } + for _, idx := range writeErr.MetricsReject { + wErr.MetricsReject = append(wErr.MetricsReject, batchIndices[bucket][idx]) + } + if !errors.Is(writeErr.Err, internal.ErrSizeLimitReached) { + continue + } + return &wErr + } + + // Return the error without special treatment + wErr.Err = err + return &wErr } return nil } @@ -222,11 +240,16 @@ func (c *httpClient) splitAndWriteBatch(ctx context.Context, bucket string, metr } func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { - // Serialize the metrics - body, err := c.serializer.SerializeBatch(metrics) - if err != nil { - return err + // Get the current limit for the outbound data + ratets := time.Now() + limit := c.rateLimiter.Remaining(ratets) + + // Serialize the metrics with the remaining limit, exit early if nothing was serialized + body, werr := c.serializer.SerializeBatch(metrics, limit) + if werr != nil && !errors.Is(werr, internal.ErrSizeLimitReached) || len(body) == 0 { + return werr } + used := int64(len(body)) // Encode the content if requested if c.encoder != nil { @@ -249,6 +272,7 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te c.addHeaders(req) // Execute the request + c.rateLimiter.Accept(ratets, used) resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { internal.OnClientError(c.client, err) @@ -269,7 +293,7 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te http.StatusMultiStatus, http.StatusAlreadyReported: c.retryCount = 0 - return nil + return werr } // We got an error and now try to decode further @@ -294,11 +318,18 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te http.StatusBadRequest, // request was received but server refused to process it due to a semantic problem with the request. // for example, submitting metrics outside the retention period. - // Clients should *not* repeat the request and the metrics should be dropped. http.StatusUnprocessableEntity, http.StatusNotAcceptable: - c.log.Errorf("Failed to write metric to %s (will be dropped: %s): %s\n", bucket, resp.Status, desc) - return nil + + // Clients should *not* repeat the request and the metrics should be dropped. + rejected := make([]int, 0, len(metrics)) + for i := range len(metrics) { + rejected = append(rejected, i) + } + return &internal.PartialWriteError{ + Err: fmt.Errorf("failed to write metric to %s (will be dropped: %s): %s", bucket, resp.Status, desc), + MetricsReject: rejected, + } case http.StatusUnauthorized, http.StatusForbidden: return fmt.Errorf("failed to write metric to %s (%s): %s", bucket, resp.Status, desc) case http.StatusTooManyRequests, @@ -316,8 +347,14 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te // if it's any other 4xx code, the client should not retry as it's the client's mistake. // retrying will not make the request magically work. if len(resp.Status) > 0 && resp.Status[0] == '4' { - c.log.Errorf("Failed to write metric to %s (will be dropped: %s): %s\n", bucket, resp.Status, desc) - return nil + rejected := make([]int, 0, len(metrics)) + for i := range len(metrics) { + rejected = append(rejected, i) + } + return &internal.PartialWriteError{ + Err: fmt.Errorf("failed to write metric to %s (will be dropped: %s): %s", bucket, resp.Status, desc), + MetricsReject: rejected, + } } // This is only until platform spec is fully implemented. As of the diff --git a/plugins/outputs/influxdb_v2/influxdb_v2.go b/plugins/outputs/influxdb_v2/influxdb_v2.go index 15a66632788e2..89b0d8d2f875a 100644 --- a/plugins/outputs/influxdb_v2/influxdb_v2.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/ratelimiter" commontls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -44,10 +45,11 @@ type InfluxDB struct { ReadIdleTimeout config.Duration `toml:"read_idle_timeout"` Log telegraf.Logger `toml:"-"` commontls.ClientConfig + ratelimiter.RateLimitConfig clients []*httpClient encoder internal.ContentEncoder - serializer *influx.Serializer + serializer ratelimiter.Serializer tlsCfg *tls.Config } @@ -65,7 +67,7 @@ func (i *InfluxDB) Init() error { i.URLs = append(i.URLs, "http://localhost:8086") } - // Check options + // Init encoding if configured switch i.ContentEncoding { case "", "gzip": i.ContentEncoding = "gzip" @@ -80,13 +82,14 @@ func (i *InfluxDB) Init() error { } // Setup the limited serializer - i.serializer = &influx.Serializer{ + serializer := &influx.Serializer{ UintSupport: i.UintSupport, OmitTimestamp: i.OmitTimestamp, } - if err := i.serializer.Init(); err != nil { + if err := serializer.Init(); err != nil { return fmt.Errorf("setting up serializer failed: %w", err) } + i.serializer = ratelimiter.NewIndividualSerializer(serializer) // Setup the client config tlsCfg, err := i.ClientConfig.TLSConfig() @@ -142,6 +145,10 @@ func (i *InfluxDB) Connect() error { switch parts.Scheme { case "http", "https", "unix": + limiter, err := i.RateLimitConfig.CreateRateLimiter() + if err != nil { + return err + } c := &httpClient{ url: parts, localAddr: localAddr, @@ -158,8 +165,9 @@ func (i *InfluxDB) Connect() error { tlsConfig: i.tlsCfg, pingTimeout: i.PingTimeout, readIdleTimeout: i.ReadIdleTimeout, - serializer: i.serializer, encoder: i.encoder, + rateLimiter: limiter, + serializer: i.serializer, log: i.Log, } @@ -191,6 +199,10 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { for _, n := range rand.Perm(len(i.clients)) { client := i.clients[n] if err := client.Write(ctx, metrics); err != nil { + var werr *internal.PartialWriteError + if errors.As(err, &werr) || errors.Is(err, internal.ErrSizeLimitReached) { + return err + } i.Log.Errorf("When writing to [%s]: %v", client.url, err) continue } diff --git a/plugins/outputs/influxdb_v2/influxdb_v2_test.go b/plugins/outputs/influxdb_v2/influxdb_v2_test.go index 36c3c3b08e0d9..f93617a38744e 100644 --- a/plugins/outputs/influxdb_v2/influxdb_v2_test.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2_test.go @@ -7,6 +7,7 @@ import ( "net/http/httptest" "reflect" "strings" + "sync/atomic" "testing" "time" @@ -14,7 +15,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/common/ratelimiter" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" @@ -373,3 +376,113 @@ func TestTooLargeWriteRetry(t *testing.T) { } require.Error(t, plugin.Write(hugeMetrics)) } + +func TestRateLimit(t *testing.T) { + // Setup a test server + var received atomic.Uint64 + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v2/write": + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusUnprocessableEntity) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusUnprocessableEntity) + return + } + received.Add(uint64(len(body))) + + w.WriteHeader(http.StatusNoContent) + + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + // Setup plugin and connect + plugin := &influxdb.InfluxDB{ + URLs: []string{"http://" + ts.Listener.Addr().String()}, + Bucket: "telegraf", + ContentEncoding: "identity", + RateLimitConfig: ratelimiter.RateLimitConfig{ + Limit: 50, + Period: config.Duration(time.Second), + }, + Log: &testutil.Logger{}, + } + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + defer plugin.Close() + + // Together the metric batch size is too big, split up, we get success + metrics := []telegraf.Metric{ + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 1), + ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 99.0, + }, + time.Unix(0, 2), + ), + metric.New( + "operating_hours", + map[string]string{ + "machine": "A", + }, + map[string]interface{}{ + "value": 123.456, + }, + time.Unix(0, 3), + ), + metric.New( + "status", + map[string]string{ + "machine": "B", + }, + map[string]interface{}{ + "temp": 48.235, + "remaining": 999.999, + }, + time.Unix(0, 4), + ), + } + + // Write the metrics the first time. Only the first two metrics should be + // received by the server due to the rate limit. + require.ErrorIs(t, plugin.Write(metrics), internal.ErrSizeLimitReached) + require.LessOrEqual(t, received.Load(), uint64(30)) + + // A direct follow-up write attempt with the remaining metrics should fail + // due to the rate limit being reached + require.ErrorIs(t, plugin.Write(metrics[2:]), internal.ErrSizeLimitReached) + require.LessOrEqual(t, received.Load(), uint64(30)) + + // Wait for at least the period (plus some safety margin) to write the third metric + time.Sleep(time.Duration(plugin.RateLimitConfig.Period) + 100*time.Millisecond) + require.ErrorIs(t, plugin.Write(metrics[2:]), internal.ErrSizeLimitReached) + require.Greater(t, received.Load(), uint64(30)) + require.LessOrEqual(t, received.Load(), uint64(72)) + + // Wait again for the period for at least the period (plus some safety margin) + // to write the last metric. This should finally succeed as all metrics + // are written. + time.Sleep(time.Duration(plugin.RateLimitConfig.Period) + 100*time.Millisecond) + require.NoError(t, plugin.Write(metrics[3:])) + require.Equal(t, uint64(121), received.Load()) +} diff --git a/plugins/outputs/influxdb_v2/sample.conf b/plugins/outputs/influxdb_v2/sample.conf index 5fc41a6613686..e5de679fcd7fb 100644 --- a/plugins/outputs/influxdb_v2/sample.conf +++ b/plugins/outputs/influxdb_v2/sample.conf @@ -71,3 +71,9 @@ # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Rate limits for sending data (disabled by default) + ## Available, uncompressed payload size e.g. "5Mb" + # rate_limit = "unlimited" + ## Fixed time-window for the available payload size e.g. "5m" + # rate_limit_period = "0s" From c0db9648eb8b4057ec35ad6abc53a20cd96bd843 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:50:49 +0100 Subject: [PATCH 056/170] chore: Update go to v1.23.4 (#16265) --- .circleci/config.yml | 2 +- .github/workflows/readme-linter.yml | 2 +- scripts/ci.docker | 2 +- scripts/installgo_linux.sh | 4 ++-- scripts/installgo_mac.sh | 6 +++--- scripts/installgo_windows.sh | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 20568a9c4284f..3ca65eca9e3c6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,7 +8,7 @@ executors: working_directory: '/go/src/github.com/influxdata/telegraf' resource_class: large docker: - - image: 'quay.io/influxdb/telegraf-ci:1.23.3' + - image: 'quay.io/influxdb/telegraf-ci:1.23.4' environment: GOFLAGS: -p=4 mac: diff --git a/.github/workflows/readme-linter.yml b/.github/workflows/readme-linter.yml index e8bca6d257e49..a23a1fccaab9b 100644 --- a/.github/workflows/readme-linter.yml +++ b/.github/workflows/readme-linter.yml @@ -11,7 +11,7 @@ jobs: steps: - uses: actions/setup-go@v5 with: - go-version: '1.23.3' + go-version: '1.23.4' - uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/scripts/ci.docker b/scripts/ci.docker index 65eed0d0af29a..b50f161b4ee5b 100644 --- a/scripts/ci.docker +++ b/scripts/ci.docker @@ -1,4 +1,4 @@ -FROM golang:1.23.3 +FROM golang:1.23.4 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/installgo_linux.sh b/scripts/installgo_linux.sh index e792f11cf39c4..a047d679c8bc9 100644 --- a/scripts/installgo_linux.sh +++ b/scripts/installgo_linux.sh @@ -2,10 +2,10 @@ set -eux -GO_VERSION="1.23.3" +GO_VERSION="1.23.4" GO_ARCH="linux-amd64" # from https://go.dev/dl -GO_VERSION_SHA="a0afb9744c00648bafb1b90b4aba5bdb86f424f02f9275399ce0c20b93a2c3a8" +GO_VERSION_SHA="6924efde5de86fe277676e929dc9917d466efa02fb934197bc2eba35d5680971" # Download Go and verify Go tarball setup_go () { diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index 076a53223b7bd..50d6909418ed0 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,9 +3,9 @@ set -eux ARCH=$(uname -m) -GO_VERSION="1.23.3" -GO_VERSION_SHA_arm64="31e119fe9bde6e105407a32558d5b5fa6ca11e2bd17f8b7b2f8a06aba16a0632" # from https://go.dev/dl -GO_VERSION_SHA_amd64="c7e024d5c0bc81845070f23598caf02f05b8ae88fd4ad2cd3e236ddbea833ad2" # from https://go.dev/dl +GO_VERSION="1.23.4" +GO_VERSION_SHA_arm64="87d2bb0ad4fe24d2a0685a55df321e0efe4296419a9b3de03369dbe60b8acd3a" # from https://go.dev/dl +GO_VERSION_SHA_amd64="6700067389a53a1607d30aa8d6e01d198230397029faa0b109e89bc871ab5a0e" # from https://go.dev/dl if [ "$ARCH" = 'arm64' ]; then GO_ARCH="darwin-arm64" diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index 15d82bc2eefbb..05d2632449d8e 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,7 +2,7 @@ set -eux -GO_VERSION="1.23.3" +GO_VERSION="1.23.4" setup_go () { choco upgrade golang --allow-downgrade --version=${GO_VERSION} From a9c91f162ddbe453364f68a89799535c43328a3c Mon Sep 17 00:00:00 2001 From: tomas-quix <78492422+tomas-quix@users.noreply.github.com> Date: Fri, 6 Dec 2024 22:07:25 +0100 Subject: [PATCH 057/170] feat(outputs.quix): Add plugin (#16144) Co-authored-by: stereosky Co-authored-by: Sven Rebhan --- plugins/outputs/all/quix.go | 5 + plugins/outputs/quix/README.md | 58 ++++++++++ plugins/outputs/quix/config.go | 81 ++++++++++++++ plugins/outputs/quix/quix.go | 169 ++++++++++++++++++++++++++++ plugins/outputs/quix/quix_test.go | 180 ++++++++++++++++++++++++++++++ plugins/outputs/quix/sample.conf | 14 +++ 6 files changed, 507 insertions(+) create mode 100644 plugins/outputs/all/quix.go create mode 100644 plugins/outputs/quix/README.md create mode 100644 plugins/outputs/quix/config.go create mode 100644 plugins/outputs/quix/quix.go create mode 100644 plugins/outputs/quix/quix_test.go create mode 100644 plugins/outputs/quix/sample.conf diff --git a/plugins/outputs/all/quix.go b/plugins/outputs/all/quix.go new file mode 100644 index 0000000000000..97f559634a497 --- /dev/null +++ b/plugins/outputs/all/quix.go @@ -0,0 +1,5 @@ +//go:build !custom || outputs || outputs.quix + +package all + +import _ "github.com/influxdata/telegraf/plugins/outputs/quix" // register plugin diff --git a/plugins/outputs/quix/README.md b/plugins/outputs/quix/README.md new file mode 100644 index 0000000000000..870f7e36d4f70 --- /dev/null +++ b/plugins/outputs/quix/README.md @@ -0,0 +1,58 @@ +# Quix Output Plugin + +This plugin writes metrics to a [Quix][quix] endpoint. + +Please consult Quix's [official documentation][docs] for more details on the +Quix platform architecture and concepts. + +⭐ Telegraf v1.33.0 +🏷️ cloud, messaging +💻 all + +[quix]: https://quix.io +[docs]: https://quix.io/docs/ + +## Global configuration options + +In addition to the plugin-specific configuration settings, plugins support +additional global and plugin configuration settings. These settings are used to +modify metrics, tags, and field or create aliases and configure ordering, etc. +See the [CONFIGURATION.md][CONFIGURATION.md] for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Secret-store support + +This plugin supports secrets from secret-stores for the `token` option. +See the [secret-store documentation][SECRETSTORE] for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + +## Configuration + +```toml @sample.conf +# Send metrics to a Quix data processing pipeline +[[outputs.quix]] + ## Endpoint for providing the configuration + # url = "https://portal-api.platform.quix.io" + + ## Workspace and topics to send the metrics to + workspace = "your_workspace" + topic = "your_topic" + + ## Authentication token created in Quix + token = "your_auth_token" + + ## Amount of time allowed to complete the HTTP request for fetching the config + # timeout = "5s" +``` + +The plugin requires a [SDK token][token] for authentication with Quix. You can +generate the `token` in settings under the `API and tokens` section. + +Furthermore, the `workspace` parameter must be set to the `Workspace ID` or the +`Environment ID` of your Quix project. Those values can be found in settings +under the `General settings` section. + +[token]: https://quix.io/docs/develop/authentication/personal-access-token.html diff --git a/plugins/outputs/quix/config.go b/plugins/outputs/quix/config.go new file mode 100644 index 0000000000000..ed36f04ad1ba3 --- /dev/null +++ b/plugins/outputs/quix/config.go @@ -0,0 +1,81 @@ +package quix + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" +) + +type brokerConfig struct { + BootstrapServers string `json:"bootstrap.servers"` + SaslMechanism string `json:"sasl.mechanism"` + SaslUsername string `json:"sasl.username"` + SaslPassword string `json:"sasl.password"` + SecurityProtocol string `json:"security.protocol"` + SSLCertBase64 string `json:"ssl.ca.cert"` + + cert []byte +} + +func (q *Quix) fetchBrokerConfig() (*brokerConfig, error) { + // Create request + endpoint := fmt.Sprintf("%s/workspaces/%s/broker/librdkafka", q.APIURL, q.Workspace) + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, fmt.Errorf("creating request failed: %w", err) + } + + // Setup authentication + token, err := q.Token.Get() + if err != nil { + return nil, fmt.Errorf("getting token failed: %w", err) + } + req.Header.Set("Authorization", "Bearer "+token.String()) + req.Header.Set("Accept", "application/json") + token.Destroy() + + // Query the broker configuration from the Quix API + client, err := q.HTTPClientConfig.CreateClient(context.Background(), q.Log) + if err != nil { + return nil, fmt.Errorf("creating client failed: %w", err) + } + defer client.CloseIdleConnections() + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("executing request failed: %w", err) + } + defer resp.Body.Close() + + // Read the body as we need it both in case of an error as well as for + // decoding the config in case of success + body, err := io.ReadAll(resp.Body) + if err != nil { + q.Log.Errorf("Reading message body failed: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected response %q (%d): %s", + http.StatusText(resp.StatusCode), + resp.StatusCode, + string(body), + ) + } + + // Decode the broker and the returned certificate + var cfg brokerConfig + if err := json.Unmarshal(body, &cfg); err != nil { + return nil, fmt.Errorf("decoding body failed: %w", err) + } + + cert, err := base64.StdEncoding.DecodeString(cfg.SSLCertBase64) + if err != nil { + return nil, fmt.Errorf("decoding certificate failed: %w", err) + } + cfg.cert = cert + + return &cfg, nil +} diff --git a/plugins/outputs/quix/quix.go b/plugins/outputs/quix/quix.go new file mode 100644 index 0000000000000..4ecd43a4f9aee --- /dev/null +++ b/plugins/outputs/quix/quix.go @@ -0,0 +1,169 @@ +//go:generate ../../../tools/readme_config_includer/generator +package quix + +import ( + "crypto/tls" + "crypto/x509" + _ "embed" + "errors" + "fmt" + "strings" + "time" + + "github.com/IBM/sarama" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + common_http "github.com/influxdata/telegraf/plugins/common/http" + common_kafka "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/plugins/serializers/json" +) + +//go:embed sample.conf +var sampleConfig string + +type Quix struct { + APIURL string `toml:"url"` + Workspace string `toml:"workspace"` + Topic string `toml:"topic"` + Token config.Secret `toml:"token"` + Log telegraf.Logger `toml:"-"` + common_http.HTTPClientConfig + + producer sarama.SyncProducer + serializer serializers.Serializer + kakfaTopic string +} + +func (*Quix) SampleConfig() string { + return sampleConfig +} + +func (q *Quix) Init() error { + // Set defaults + if q.APIURL == "" { + q.APIURL = "https://portal-api.platform.quix.io" + } + q.APIURL = strings.TrimSuffix(q.APIURL, "/") + + // Check input parameters + if q.Topic == "" { + return errors.New("option 'topic' must be set") + } + if q.Workspace == "" { + return errors.New("option 'workspace' must be set") + } + if q.Token.Empty() { + return errors.New("option 'token' must be set") + } + q.kakfaTopic = q.Workspace + "-" + q.Topic + + // Create a JSON serializer for the output + q.serializer = &json.Serializer{ + TimestampUnits: config.Duration(time.Nanosecond), // Hardcoded nanoseconds precision + } + + return nil +} + +func (q *Quix) Connect() error { + // Fetch the Kafka broker configuration from the Quix HTTP endpoint + quixConfig, err := q.fetchBrokerConfig() + if err != nil { + return fmt.Errorf("fetching broker config failed: %w", err) + } + brokers := strings.Split(quixConfig.BootstrapServers, ",") + if len(brokers) == 0 { + return errors.New("no brokers received") + } + + // Setup the Kakfa producer config + cfg := sarama.NewConfig() + cfg.Producer.Return.Successes = true + + switch quixConfig.SecurityProtocol { + case "SASL_SSL": + cfg.Net.SASL.Enable = true + cfg.Net.SASL.User = quixConfig.SaslUsername + cfg.Net.SASL.Password = quixConfig.SaslPassword + cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 + cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common_kafka.XDGSCRAMClient{HashGeneratorFcn: common_kafka.SHA256} + } + + switch quixConfig.SaslMechanism { + case "SCRAM-SHA-512": + cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common_kafka.XDGSCRAMClient{HashGeneratorFcn: common_kafka.SHA512} + } + cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + case "SCRAM-SHA-256": + cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 + cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common_kafka.XDGSCRAMClient{HashGeneratorFcn: common_kafka.SHA256} + } + case "PLAIN": + cfg.Net.SASL.Mechanism = sarama.SASLTypePlaintext + default: + return fmt.Errorf("unsupported SASL mechanism: %s", quixConfig.SaslMechanism) + } + + // Certificate + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(quixConfig.cert) { + return errors.New("appending CA cert to pool failed") + } + cfg.Net.TLS.Enable = true + cfg.Net.TLS.Config = &tls.Config{RootCAs: certPool} + case "PLAINTEXT": + // No additional configuration required for plaintext communication + default: + return fmt.Errorf("unsupported security protocol: %s", quixConfig.SecurityProtocol) + } + + // Setup the Kakfa producer itself + producer, err := sarama.NewSyncProducer(brokers, cfg) + if err != nil { + return fmt.Errorf("creating producer failed: %w", err) + } + q.producer = producer + + return nil +} + +func (q *Quix) Write(metrics []telegraf.Metric) error { + for _, m := range metrics { + serialized, err := q.serializer.Serialize(m) + if err != nil { + q.Log.Errorf("Error serializing metric: %v", err) + continue + } + + msg := &sarama.ProducerMessage{ + Topic: q.kakfaTopic, + Value: sarama.ByteEncoder(serialized), + Timestamp: m.Time(), + Key: sarama.StringEncoder("telegraf"), + } + + if _, _, err = q.producer.SendMessage(msg); err != nil { + q.Log.Errorf("Error sending message to Kafka: %v", err) + continue + } + } + + return nil +} + +func (q *Quix) Close() error { + if q.producer != nil { + return q.producer.Close() + } + return nil +} + +func init() { + outputs.Add("quix", func() telegraf.Output { return &Quix{} }) +} diff --git a/plugins/outputs/quix/quix_test.go b/plugins/outputs/quix/quix_test.go new file mode 100644 index 0000000000000..00726b3e6ff26 --- /dev/null +++ b/plugins/outputs/quix/quix_test.go @@ -0,0 +1,180 @@ +package quix + +import ( + "context" + "crypto/rand" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + kafkacontainer "github.com/testcontainers/testcontainers-go/modules/kafka" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +func TestMissingTopic(t *testing.T) { + plugin := &Quix{} + require.ErrorContains(t, plugin.Init(), "option 'topic' must be set") +} + +func TestMissingWorkspace(t *testing.T) { + plugin := &Quix{Topic: "foo"} + require.ErrorContains(t, plugin.Init(), "option 'workspace' must be set") +} + +func TestMissingToken(t *testing.T) { + plugin := &Quix{Topic: "foo", Workspace: "bar"} + require.ErrorContains(t, plugin.Init(), "option 'token' must be set") +} + +func TestDefaultURL(t *testing.T) { + plugin := &Quix{ + Topic: "foo", + Workspace: "bar", + Token: config.NewSecret([]byte("secret")), + } + require.NoError(t, plugin.Init()) + require.Equal(t, "https://portal-api.platform.quix.io", plugin.APIURL) +} + +func TestFetchingConfig(t *testing.T) { + // Setup HTTP test-server for providing the broker config + brokerCfg := []byte(` + { + "bootstrap.servers":"servers", + "sasl.mechanism":"mechanism", + "sasl.username":"user", + "sasl.password":"password", + "security.protocol":"protocol", + "ssl.ca.cert":"Y2VydA==" + } + `) + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/workspaces/bar/broker/librdkafka" { + w.WriteHeader(http.StatusNotFound) + return + } + if r.Header.Get("Authorization") != "Bearer bXkgc2VjcmV0" { + w.WriteHeader(http.StatusUnauthorized) + return + } + if r.Header.Get("Accept") != "application/json" { + w.WriteHeader(http.StatusUnsupportedMediaType) + return + } + if _, err := w.Write(brokerCfg); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } + }), + ) + defer server.Close() + + // Setup the plugin and fetch the config + plugin := &Quix{ + APIURL: server.URL, + Topic: "foo", + Workspace: "bar", + Token: config.NewSecret([]byte("bXkgc2VjcmV0")), + } + require.NoError(t, plugin.Init()) + + // Check the config + expected := &brokerConfig{ + BootstrapServers: "servers", + SaslMechanism: "mechanism", + SaslUsername: "user", + SaslPassword: "password", + SecurityProtocol: "protocol", + SSLCertBase64: "Y2VydA==", + cert: []byte("cert"), + } + cfg, err := plugin.fetchBrokerConfig() + require.NoError(t, err) + require.Equal(t, expected, cfg) +} + +func TestConnectAndWriteIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + // Setup common config params + workspace := "test" + topic := "telegraf" + + // Setup a kafka container + ctx := context.Background() + kafkaContainer, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0") + require.NoError(t, err) + defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored + + brokers, err := kafkaContainer.Brokers(ctx) + require.NoError(t, err) + + // Setup broker config distributed via HTTP + brokerCfg := &brokerConfig{ + BootstrapServers: strings.Join(brokers, ","), + SecurityProtocol: "PLAINTEXT", + } + response, err := json.Marshal(brokerCfg) + require.NoError(t, err) + + // Setup authentication + signingKey := make([]byte, 64) + _, err = rand.Read(signingKey) + require.NoError(t, err) + + tokenRaw := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Minute)), + Issuer: "quix test", + }) + token, err := tokenRaw.SignedString(signingKey) + require.NoError(t, err) + + // Setup HTTP test-server for providing the broker config + path := "/workspaces/" + workspace + "/broker/librdkafka" + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != path { + w.WriteHeader(http.StatusNotFound) + t.Logf("invalid path %q", r.URL.Path) + return + } + if r.Header.Get("Authorization") != "Bearer "+token { + w.WriteHeader(http.StatusUnauthorized) + return + } + if r.Header.Get("Accept") != "application/json" { + w.WriteHeader(http.StatusUnsupportedMediaType) + return + } + if _, err := w.Write(response); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } + }), + ) + defer server.Close() + + // Setup the plugin and establish connection + plugin := &Quix{ + APIURL: server.URL, + Workspace: workspace, + Topic: topic, + Token: config.NewSecret([]byte(token)), + } + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + defer plugin.Close() + + // Verify that we can successfully write data to the kafka broker + require.NoError(t, plugin.Write(testutil.MockMetrics())) +} diff --git a/plugins/outputs/quix/sample.conf b/plugins/outputs/quix/sample.conf new file mode 100644 index 0000000000000..4196b24cc9370 --- /dev/null +++ b/plugins/outputs/quix/sample.conf @@ -0,0 +1,14 @@ +# Send metrics to a Quix data processing pipeline +[[outputs.quix]] + ## Endpoint for providing the configuration + # url = "https://portal-api.platform.quix.io" + + ## Workspace and topics to send the metrics to + workspace = "your_workspace" + topic = "your_topic" + + ## Authentication token created in Quix + token = "your_auth_token" + + ## Amount of time allowed to complete the HTTP request for fetching the config + # timeout = "5s" \ No newline at end of file From e5ae35f9080f21e07f61f38236856ff15b1c35fa Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 9 Dec 2024 15:52:22 +0100 Subject: [PATCH 058/170] fix(outputs.quix): Replace deprecated serializer type --- plugins/outputs/quix/quix.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/outputs/quix/quix.go b/plugins/outputs/quix/quix.go index 4ecd43a4f9aee..ddfc402e46a66 100644 --- a/plugins/outputs/quix/quix.go +++ b/plugins/outputs/quix/quix.go @@ -17,7 +17,6 @@ import ( common_http "github.com/influxdata/telegraf/plugins/common/http" common_kafka "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/json" ) @@ -33,7 +32,7 @@ type Quix struct { common_http.HTTPClientConfig producer sarama.SyncProducer - serializer serializers.Serializer + serializer telegraf.Serializer kakfaTopic string } From 14eb97aa7e995c9a973c0dd6ce0d8be407667b40 Mon Sep 17 00:00:00 2001 From: Mingyang Zheng Date: Mon, 9 Dec 2024 07:32:34 -0800 Subject: [PATCH 059/170] fix(logging): Fix deplicated prefix+attrMsg in log message when redirectLogger is used (#16274) --- logger/handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logger/handler.go b/logger/handler.go index 76bf64a9c32d8..f967ba2561a96 100644 --- a/logger/handler.go +++ b/logger/handler.go @@ -122,7 +122,7 @@ func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix str attrMsg = "(" + strings.Join(parts, ",") + ")" } - msg := []interface{}{ts.In(time.UTC).Format(time.RFC3339), level.Indicator(), prefix + attrMsg} + msg := []interface{}{ts.In(time.UTC).Format(time.RFC3339), level.Indicator()} if prefix+attrMsg != "" { msg = append(msg, prefix+attrMsg) } From e713360c69692f461290e077439dbd19fa9fca49 Mon Sep 17 00:00:00 2001 From: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:20:25 -0600 Subject: [PATCH 060/170] chore(inputs.prometheus): Improve label and field selector logging (#16228) --- plugins/inputs/prometheus/prometheus.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 1922407fd754e..191d27dd29a58 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -183,7 +183,12 @@ func (p *Prometheus) Init() error { return fmt.Errorf("the field selector %q is not supported for pods", invalidSelector) } - p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector) + if p.KubernetesLabelSelector != "" { + p.Log.Debugf("Using the label selector: %v", p.podLabelSelector) + } + if p.KubernetesFieldSelector != "" { + p.Log.Debugf("Using the field selector: %v", p.podFieldSelector) + } for k, vs := range p.NamespaceAnnotationPass { tagFilter := models.TagFilter{} From fa5da91db65564b16b64732538d0732b42be3ffd Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 9 Dec 2024 19:32:49 +0100 Subject: [PATCH 061/170] Update build version to 1.34.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index f0fed9186c9ff..2404d9590220e 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.33.0 \ No newline at end of file +1.34.0 \ No newline at end of file From 316f2d91fba98dd0bd9745bf35f6854a52a1572d Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 9 Dec 2024 19:38:55 +0100 Subject: [PATCH 062/170] Update changelog for v1.33.0 (cherry picked from commit 971e9e2631f2f8073a3c5b76225068f66eca81d5) --- CHANGELOG.md | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ceae796d6ef68..a022cbf4d0dac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,66 @@ # Changelog +## v1.33.0 [2024-12-09] + +### New Plugins + +- [#15754](https://github.com/influxdata/telegraf/pull/15754) `inputs.neoom_beaam` Add new plugin +- [#15869](https://github.com/influxdata/telegraf/pull/15869) `processors.batch` Add batch processor +- [#16144](https://github.com/influxdata/telegraf/pull/16144) `outputs.quix` Add plugin + +### Features + +- [#16010](https://github.com/influxdata/telegraf/pull/16010) `agent` Add --watch-interval option for polling config changes +- [#15948](https://github.com/influxdata/telegraf/pull/15948) `aggregators.basicstats` Add first field +- [#15891](https://github.com/influxdata/telegraf/pull/15891) `common.socket` Allow parallel parsing with a pool of workers +- [#16141](https://github.com/influxdata/telegraf/pull/16141) `inputs.amqp_consumer` Allow specification of queue arguments +- [#15950](https://github.com/influxdata/telegraf/pull/15950) `inputs.diskio` Add field io await and util +- [#15919](https://github.com/influxdata/telegraf/pull/15919) `inputs.kafka_consumer` Implement startup error behavior options +- [#15910](https://github.com/influxdata/telegraf/pull/15910) `inputs.memcached` Add support for external-store metrics +- [#15990](https://github.com/influxdata/telegraf/pull/15990) `inputs.mock` Add sine phase +- [#16040](https://github.com/influxdata/telegraf/pull/16040) `inputs.modbus` Allow grouping across register types +- [#15865](https://github.com/influxdata/telegraf/pull/15865) `inputs.prometheus` Allow to use secrets for credentials +- [#16230](https://github.com/influxdata/telegraf/pull/16230) `inputs.smart` Add Power on Hours and Cycle Count +- [#15935](https://github.com/influxdata/telegraf/pull/15935) `inputs.snmp` Add displayhint conversion +- [#16027](https://github.com/influxdata/telegraf/pull/16027) `inputs.snmp` Convert uneven bytes to int +- [#15976](https://github.com/influxdata/telegraf/pull/15976) `inputs.socket_listener` Use reception time as timestamp +- [#15853](https://github.com/influxdata/telegraf/pull/15853) `inputs.statsd` Allow reporting sets and timings count as floats +- [#11591](https://github.com/influxdata/telegraf/pull/11591) `inputs.vsphere` Add VM memory configuration +- [#16109](https://github.com/influxdata/telegraf/pull/16109) `inputs.vsphere` Add cpu temperature field +- [#15917](https://github.com/influxdata/telegraf/pull/15917) `inputs` Add option to choose the metric time source +- [#16242](https://github.com/influxdata/telegraf/pull/16242) `logging` Allow overriding message key for structured logging +- [#15742](https://github.com/influxdata/telegraf/pull/15742) `outputs.influxdb_v2` Add rate limit implementation +- [#15943](https://github.com/influxdata/telegraf/pull/15943) `outputs.mqtt` Add sprig functions for topic name generator +- [#16041](https://github.com/influxdata/telegraf/pull/16041) `outputs.postgresql` Allow limiting of column name length +- [#16258](https://github.com/influxdata/telegraf/pull/16258) `outputs` Add rate-limiting infrastructure +- [#16146](https://github.com/influxdata/telegraf/pull/16146) `outputs` Implement partial write errors +- [#15883](https://github.com/influxdata/telegraf/pull/15883) `outputs` Only copy metric if its not filtered out +- [#15893](https://github.com/influxdata/telegraf/pull/15893) `serializers.prometheusremotewrite` Log metric conversion errors + +### Bugfixes + +- [#16248](https://github.com/influxdata/telegraf/pull/16248) `inputs.netflow` Decode flags in TCP and IP headers correctly +- [#16257](https://github.com/influxdata/telegraf/pull/16257) `inputs.procstat` Handle running processes correctly across multiple filters +- [#16219](https://github.com/influxdata/telegraf/pull/16219) `logging` Add Close() func for redirectLogger +- [#16255](https://github.com/influxdata/telegraf/pull/16255) `logging` Clean up extra empty spaces when redirectLogger is used +- [#16274](https://github.com/influxdata/telegraf/pull/16274) `logging` Fix duplicated prefix and attrMsg in log message when redirectLogger is used + +### Dependency Updates + +- [#16232](https://github.com/influxdata/telegraf/pull/16232) `deps` Bump cloud.google.com/go/bigquery from 1.63.1 to 1.64.0 +- [#16235](https://github.com/influxdata/telegraf/pull/16235) `deps` Bump cloud.google.com/go/storage from 1.43.0 to 1.47.0 +- [#16198](https://github.com/influxdata/telegraf/pull/16198) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.42.2 to 1.43.1 +- [#16234](https://github.com/influxdata/telegraf/pull/16234) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.29.3 to 1.32.6 +- [#16201](https://github.com/influxdata/telegraf/pull/16201) `deps` Bump github.com/intel/powertelemetry from 1.0.1 to 1.0.2 +- [#16200](https://github.com/influxdata/telegraf/pull/16200) `deps` Bump github.com/rclone/rclone from 1.68.1 to 1.68.2 +- [#16199](https://github.com/influxdata/telegraf/pull/16199) `deps` Bump github.com/vishvananda/netns from 0.0.4 to 0.0.5 +- [#16236](https://github.com/influxdata/telegraf/pull/16236) `deps` Bump golang.org/x/net from 0.30.0 to 0.31.0 +- [#16250](https://github.com/influxdata/telegraf/pull/16250) `deps` Bump golangci-lint from v1.62.0 to v1.62.2 +- [#16233](https://github.com/influxdata/telegraf/pull/16233) `deps` Bump google.golang.org/grpc from 1.67.1 to 1.68.0 +- [#16202](https://github.com/influxdata/telegraf/pull/16202) `deps` Bump modernc.org/sqlite from 1.33.1 to 1.34.1 +- [#16203](https://github.com/influxdata/telegraf/pull/16203) `deps` Bump super-linter/super-linter from 7.1.0 to 7.2.0 + ## v1.32.3 [2024-11-18] ### Important Changes From 6d28c389e4652241a14ac755cb693db4e0595cb2 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Tue, 10 Dec 2024 11:22:30 +0800 Subject: [PATCH 063/170] feat(outputs): Fix go mod error --- go.sum | 8 -------- 1 file changed, 8 deletions(-) diff --git a/go.sum b/go.sum index 8b87a9e19d693..b9102cd078595 100644 --- a/go.sum +++ b/go.sum @@ -2561,8 +2561,6 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2718,8 +2716,6 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2908,8 +2904,6 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2930,8 +2924,6 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From de17b2fb46e01b2588575e23098627f7f4f29f06 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Tue, 10 Dec 2024 11:35:03 +0800 Subject: [PATCH 064/170] feat(outputs): Fix serializer type error --- plugins/outputs/inlong/inlong.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index 004e1db835cd0..eb315eee77cb1 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -10,7 +10,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -26,14 +25,14 @@ type Inlong struct { producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) producer dataproxy.Client - serializer serializers.Serializer + serializer telegraf.Serializer } func (i *Inlong) SampleConfig() string { return sampleConfig } -func (i *Inlong) SetSerializer(serializer serializers.Serializer) { +func (i *Inlong) SetSerializer(serializer telegraf.Serializer) { i.serializer = serializer } From a3bd746876c401fd805ee327ce855433c752368d Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:24:36 +0100 Subject: [PATCH 065/170] chore: Update link to release calendar (#16278) --- docs/FAQ.md | 2 +- docs/RELEASES.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index 044e68ab4545c..4016ebc97b9b4 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -15,7 +15,7 @@ new features are held for the next minor release. Users can view what [GitHub milestones][] a PR belongs to to determine the release it will go out with. -[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_1ikq7u4f5c4o6mh9ep4duo3avk%40group.calendar.google.com +[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_03d981cefd8d6432894cb162da5c6186e393bc0f970ca6c371201aa05d30d763%40group.calendar.google.com [GitHub milestones]: https://github.com/influxdata/telegraf/milestones ## How can I filter or select specific metrics? diff --git a/docs/RELEASES.md b/docs/RELEASES.md index 9b0b1a26ab7e5..233cb2acc0907 100644 --- a/docs/RELEASES.md +++ b/docs/RELEASES.md @@ -19,5 +19,5 @@ new features are held for the next minor release. Users can view what [GitHub milestones][] a PR belongs to when they want to determine the release it will go out with. -[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_1ikq7u4f5c4o6mh9ep4duo3avk%40group.calendar.google.com +[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_03d981cefd8d6432894cb162da5c6186e393bc0f970ca6c371201aa05d30d763%40group.calendar.google.com [GitHub milestones]: https://github.com/influxdata/telegraf/milestones From 7b209a11c33fbcf702d90008ca39c2e996fa103c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:25:05 -0600 Subject: [PATCH 066/170] chore(deps): Bump github.com/aws/aws-sdk-go-v2/config from 1.27.39 to 1.28.6 (#16280) --- go.mod | 22 +++++++++++----------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index 1c2ca658754f1..e55e46c3ef617 100644 --- a/go.mod +++ b/go.mod @@ -44,16 +44,16 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/awnumar/memguard v0.22.5 - github.com/aws/aws-sdk-go-v2 v1.32.5 - github.com/aws/aws-sdk-go-v2/config v1.27.39 - github.com/aws/aws-sdk-go-v2/credentials v1.17.44 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 + github.com/aws/aws-sdk-go-v2 v1.32.6 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.36.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6 - github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 github.com/aws/smithy-go v1.22.1 github.com/benbjohnson/clock v1.3.5 @@ -284,19 +284,19 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect diff --git a/go.sum b/go.sum index d6205d4a2feab..cf3f7beaa2bf0 100644 --- a/go.sum +++ b/go.sum @@ -872,34 +872,34 @@ github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= -github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= +github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= -github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= -github.com/aws/aws-sdk-go-v2/config v1.27.39/go.mod h1:wczj2hbyskP4LjMKBEZwPRO1shXY+GsQleab+ZXT2ik= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.44 h1:qqfs5kulLUHUEXlHEZXLJkgGoF3kkUeFUTVA585cFpU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.44/go.mod h1:0Lm2YJ8etJdEdw23s+q/9wTpOeo2HhNE97XcRa7T8MA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 h1:FZB15YK2h/l2wO9YXvXr7/mZ5uOJIsLNZIePlHarAwg= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7/go.mod h1:xTMr0gSUW6H6nJJVV257wWlk9257DwZ7EFhPFn3itgo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 h1:woXadbf0c7enQ2UGCi8gW/WuKmE0xIzxBF/eD94jMKQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19/go.mod h1:zminj5ucw7w0r65bP6nhyOd3xL6veAUMc3ElGMoLVb4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 h1:4usbeaes3yJnCFC7kfeyhkdkPtoRYPa/hTmCqMpKpLI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24/go.mod h1:5CI1JemjVwde8m2WG3cz23qHKPOxbpkq0HaoreEgLIY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 h1:N1zsICrQglfzaBnrfM0Ys00860C+QFwu6u/5+LomP+o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24/go.mod h1:dCn9HbJ8+K31i8IQ8EWmWj0EiIk0+vKiHNMxTTYveAg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= @@ -919,8 +919,8 @@ github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1/go.mod h1:ifHRXsCyL github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 h1:2ZzpXgkh4qmsexltvLVIaC4+HdN3oe6OWK6Upc4Qz/0= github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1/go.mod h1:eu3DWRK5GBq4hjCr7nAbnQiHSan5RJ6ue3qQVp5PJs0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= @@ -928,8 +928,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.2 h1:1G7T github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.2/go.mod h1:+ybYGLXoF7bcD7wIcMcklxyABZQmuBf1cHUhvY6FGIo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 h1:tHxQi/XHPK0ctd/wdOw0t7Xrc2OxcRCnVzv8lwWPu0c= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4GQbF1vJzG60poZqWatZlhP31y8PGCCVTvIGPdaaYJ0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= @@ -939,15 +939,15 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 h1:HJwZwRt2Z2Tdec+m+fPjvdmkq2s9Ra+VR0hjF7V2o40= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.5/go.mod h1:wrMCEwjFPms+V86TCQQeOxQF/If4vT44FGIOFiMC2ck= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 h1:zcx9LiGWZ6i6pjdcoE9oXAB6mUdeyC36Ia/QEiIvYdg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4/go.mod h1:Tp/ly1cTjRLGBBmNccFumbZ8oqpZlpdhFf80SrRh4is= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 h1:yDxvkz3/uOKfxnv8YhzOi9m+2OGIxF+on3KOISbK5IU= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.4/go.mod h1:9XEUty5v5UAsMiFOBJrNibZgwCeOma73jgGwwhgffa8= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 h1:glNNLfVzW88jz83oPZ4gXndJL7VDDANHowCoJU673OU= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4/go.mod h1:VUHrcV1XoUd6ZWzIMal9CeAA2EiKkAhmImuRGhNbaxg= github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= From 0d7d9c54b28ecb05c4b7191726bf08353e949063 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:25:29 -0600 Subject: [PATCH 067/170] chore(deps): Bump cloud.google.com/go/monitoring from 1.21.1 to 1.22.0 (#16283) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e55e46c3ef617..a4d9995d13efc 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.23.0 require ( cloud.google.com/go/bigquery v1.64.0 - cloud.google.com/go/monitoring v1.21.1 + cloud.google.com/go/monitoring v1.22.0 cloud.google.com/go/pubsub v1.45.1 cloud.google.com/go/storage v1.47.0 collectd.org v0.6.0 diff --git a/go.sum b/go.sum index cf3f7beaa2bf0..1a4d394f51ae5 100644 --- a/go.sum +++ b/go.sum @@ -387,8 +387,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= -cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= +cloud.google.com/go/monitoring v1.22.0 h1:mQ0040B7dpuRq1+4YiQD43M2vW9HgoVxY98xhqGT+YI= +cloud.google.com/go/monitoring v1.22.0/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= From 037aea2a372d9ba07911b2aec157963e164d3c84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:25:50 -0600 Subject: [PATCH 068/170] chore(deps): Bump github.com/nats-io/nats.go from 1.36.0 to 1.37.0 (#16282) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a4d9995d13efc..e1779b2635906 100644 --- a/go.mod +++ b/go.mod @@ -145,7 +145,7 @@ require ( github.com/moby/ipvs v1.1.0 github.com/multiplay/go-ts3 v1.2.0 github.com/nats-io/nats-server/v2 v2.10.17 - github.com/nats-io/nats.go v1.36.0 + github.com/nats-io/nats.go v1.37.0 github.com/netsampler/goflow2/v2 v2.2.1 github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1 github.com/nsqio/go-nsq v1.1.0 diff --git a/go.sum b/go.sum index 1a4d394f51ae5..897d34883cc02 100644 --- a/go.sum +++ b/go.sum @@ -1956,8 +1956,8 @@ github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c= github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A= github.com/nats-io/nats-server/v2 v2.10.17 h1:PTVObNBD3TZSNUDgzFb1qQsQX4mOgFmOuG9vhT+KBUY= github.com/nats-io/nats-server/v2 v2.10.17/go.mod h1:5OUyc4zg42s/p2i92zbbqXvUNsbF0ivdTLKshVMn2YQ= -github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU= -github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= +github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= From 73b41f5e6d12591bfaf64a5a93dee5e069f74623 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:26:14 -0600 Subject: [PATCH 069/170] chore(deps): Bump k8s.io/client-go from 0.30.1 to 0.31.3 (#16281) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index e1779b2635906..9f8ae22fefcc5 100644 --- a/go.mod +++ b/go.mod @@ -230,9 +230,9 @@ require ( gopkg.in/olivere/elastic.v5 v5.0.86 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.30.1 - k8s.io/apimachinery v0.31.1 - k8s.io/client-go v0.30.1 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 layeh.com/radius v0.0.0-20221205141417-e7fbddd11d68 modernc.org/sqlite v1.34.1 ) diff --git a/go.sum b/go.sum index 897d34883cc02..58b901287ccef 100644 --- a/go.sum +++ b/go.sum @@ -3385,12 +3385,12 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= -k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= From e01f5f77cea7c273b0f35ed737928b49bcf874b1 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 10 Dec 2024 22:13:42 +0100 Subject: [PATCH 070/170] fix(outputs.remotefile): Handle tracking metrics correctly (#16289) --- plugins/outputs/remotefile/remotefile.go | 7 +- plugins/outputs/remotefile/remotefile_test.go | 128 ++++++++++++++++++ 2 files changed, 134 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/remotefile/remotefile.go b/plugins/outputs/remotefile/remotefile.go index 9db5c039368d4..a30badf10c6c6 100644 --- a/plugins/outputs/remotefile/remotefile.go +++ b/plugins/outputs/remotefile/remotefile.go @@ -177,7 +177,12 @@ func (f *File) Write(metrics []telegraf.Metric) error { // Group the metrics per output file groups := make(map[string][]telegraf.Metric) - for _, m := range metrics { + for _, raw := range metrics { + m := raw + if wm, ok := raw.(telegraf.UnwrappableMetric); ok { + m = wm.Unwrap() + } + for _, tmpl := range f.templates { buf.Reset() if err := tmpl.Execute(&buf, m); err != nil { diff --git a/plugins/outputs/remotefile/remotefile_test.go b/plugins/outputs/remotefile/remotefile_test.go index 70ba9a919167e..cdd5b51617967 100644 --- a/plugins/outputs/remotefile/remotefile_test.go +++ b/plugins/outputs/remotefile/remotefile_test.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" "strings" + "sync" "testing" "time" @@ -393,3 +394,130 @@ func TestForgettingFiles(t *testing.T) { require.Len(t, plugin.serializers, 1) require.Contains(t, plugin.serializers, "test-b.csv") } + +func TestTrackingMetrics(t *testing.T) { + // see issue #16045 + inputRaw := []telegraf.Metric{ + metric.New( + "test", + map[string]string{"source": "localhost"}, + map[string]interface{}{"value": 23}, + time.Unix(1719410465, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 21}, + time.Unix(1719410465, 0), + ), + metric.New( + "test", + map[string]string{"source": "localhost"}, + map[string]interface{}{"value": 42}, + time.Unix(1719410485, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 66}, + time.Unix(1719410485, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 55}, + time.Unix(1716310124, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 1}, + time.Unix(1716310174, 0), + ), + } + + // Create tracking metrics as inputs for the test + var mu sync.Mutex + delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw)) + notify := func(di telegraf.DeliveryInfo) { + mu.Lock() + defer mu.Unlock() + delivered = append(delivered, di) + } + input := make([]telegraf.Metric, 0, len(inputRaw)) + for _, m := range inputRaw { + tm, _ := metric.WithTracking(m, notify) + input = append(input, tm) + } + + // Create the expectations + expected := map[string][]string{ + "localhost-2024-06-26": { + "test,source=localhost value=23i 1719410465000000000\n", + "test,source=localhost value=42i 1719410485000000000\n", + }, + "remotehost-2024-06-26": { + "test,source=remotehost value=21i 1719410465000000000\n", + "test,source=remotehost value=66i 1719410485000000000\n", + }, + "remotehost-2024-05-21": { + "test,source=remotehost value=55i 1716310124000000000\n", + "test,source=remotehost value=1i 1716310174000000000\n", + }, + } + + // Prepare the output filesystem + tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + // Setup the plugin including the serializer + plugin := &File{ + Remote: config.NewSecret([]byte("local:" + tmpdir)), + Files: []string{`{{.Tag "source"}}-{{.Time.Format "2006-01-02"}}`}, + WriteBackInterval: config.Duration(100 * time.Millisecond), + Log: &testutil.Logger{}, + } + + plugin.SetSerializerFunc(func() (telegraf.Serializer, error) { + serializer := &influx.Serializer{} + err := serializer.Init() + return serializer, err + }) + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + defer plugin.Close() + + // Write the metrics and wait for the data to settle to disk + require.NoError(t, plugin.Write(input)) + require.Eventually(t, func() bool { + ok := true + for fn := range expected { + _, err := os.Stat(filepath.Join(tmpdir, fn)) + ok = ok && err == nil + } + return ok + }, 5*time.Second, 100*time.Millisecond) + + // Check the result + for fn, lines := range expected { + tmpfn := filepath.Join(tmpdir, fn) + require.FileExists(t, tmpfn) + + actual, err := os.ReadFile(tmpfn) + require.NoError(t, err) + require.Equal(t, strings.Join(lines, ""), string(actual)) + } + + // Simulate output acknowledging delivery + for _, m := range input { + m.Accept() + } + + // Check delivery + require.Eventuallyf(t, func() bool { + mu.Lock() + defer mu.Unlock() + return len(input) == len(delivered) + }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) +} From bcea9a28c0070b4e959afe25ccdcde2c44c937ae Mon Sep 17 00:00:00 2001 From: Dan Fuchs <330402+fajpunk@users.noreply.github.com> Date: Tue, 10 Dec 2024 15:14:58 -0600 Subject: [PATCH 071/170] feat(parsers.avro): Allow union fields to be specified as tags (#16272) --- plugins/parsers/avro/parser.go | 82 +++++++++++-------- .../testcases/union-nullable-tag/expected.out | 1 + .../testcases/union-nullable-tag/message.json | 14 ++++ .../union-nullable-tag/telegraf.conf | 27 ++++++ 4 files changed, 89 insertions(+), 35 deletions(-) create mode 100644 plugins/parsers/avro/testcases/union-nullable-tag/expected.out create mode 100644 plugins/parsers/avro/testcases/union-nullable-tag/message.json create mode 100644 plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf diff --git a/plugins/parsers/avro/parser.go b/plugins/parsers/avro/parser.go index 1f9a911f8f61f..6735eeb3499df 100644 --- a/plugins/parsers/avro/parser.go +++ b/plugins/parsers/avro/parser.go @@ -180,6 +180,41 @@ func (p *Parser) flattenField(fldName string, fldVal map[string]interface{}) map return ret } +func (p *Parser) flattenItem(fld string, fldVal interface{}) (map[string]interface{}, error) { + sep := flatten.SeparatorStyle{ + Before: "", + Middle: p.FieldSeparator, + After: "", + } + candidate := make(map[string]interface{}) + candidate[fld] = fldVal + + var flat map[string]interface{} + var err error + // Exactly how we flatten is decided by p.UnionMode + if p.UnionMode == "flatten" { + flat, err = flatten.Flatten(candidate, "", sep) + if err != nil { + return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) + } + } else { + // "nullable" or "any" + typedVal, ok := candidate[fld].(map[string]interface{}) + if !ok { + // the "key" is not a string, so ... + // most likely an array? Do the default thing + // and flatten the candidate. + flat, err = flatten.Flatten(candidate, "", sep) + if err != nil { + return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) + } + } else { + flat = p.flattenField(fld, typedVal) + } + } + return flat, nil +} + func (p *Parser) createMetric(data map[string]interface{}, schema string) (telegraf.Metric, error) { // Tags differ from fields, in that tags are inherently strings. // fields can be of any type. @@ -193,12 +228,18 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg // Avro doesn't have a Tag/Field distinction, so we have to tell // Telegraf which items are our tags. for _, tag := range p.Tags { - sTag, err := internal.ToString(data[tag]) - if err != nil { - p.Log.Warnf("Could not convert %v to string for tag %q: %v", data[tag], tag, err) - continue + flat, flattenErr := p.flattenItem(tag, data[tag]) + if flattenErr != nil { + return nil, fmt.Errorf("flatten tag %q failed: %w", tag, flattenErr) + } + for k, v := range flat { + sTag, stringErr := internal.ToString(v) + if stringErr != nil { + p.Log.Warnf("Could not convert %v to string for tag %q: %v", data[tag], tag, stringErr) + continue + } + tags[k] = sTag } - tags[tag] = sTag } var fieldList []string if len(p.Fields) != 0 { @@ -215,37 +256,8 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg } // We need to flatten out our fields. The default (the separator // string is empty) is equivalent to what streamreactor does. - sep := flatten.SeparatorStyle{ - Before: "", - Middle: p.FieldSeparator, - After: "", - } for _, fld := range fieldList { - candidate := make(map[string]interface{}) - candidate[fld] = data[fld] // 1-item map - var flat map[string]interface{} - var err error - // Exactly how we flatten is decided by p.UnionMode - if p.UnionMode == "flatten" { - flat, err = flatten.Flatten(candidate, "", sep) - if err != nil { - return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) - } - } else { - // "nullable" or "any" - typedVal, ok := candidate[fld].(map[string]interface{}) - if !ok { - // the "key" is not a string, so ... - // most likely an array? Do the default thing - // and flatten the candidate. - flat, err = flatten.Flatten(candidate, "", sep) - if err != nil { - return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) - } - } else { - flat = p.flattenField(fld, typedVal) - } - } + flat, err := p.flattenItem(fld, data[fld]) if err != nil { return nil, fmt.Errorf("flatten field %q failed: %w", fld, err) } diff --git a/plugins/parsers/avro/testcases/union-nullable-tag/expected.out b/plugins/parsers/avro/testcases/union-nullable-tag/expected.out new file mode 100644 index 0000000000000..b4a55b5081166 --- /dev/null +++ b/plugins/parsers/avro/testcases/union-nullable-tag/expected.out @@ -0,0 +1 @@ +Switch,switch_wwn=10:00:50:EB:1A:0B:84:3A,some_union_in_a_tag=some_value statistics_collection_time=1682509200092i,up_time=1166984904i,memory_utilization=20.0 1682509200092000 diff --git a/plugins/parsers/avro/testcases/union-nullable-tag/message.json b/plugins/parsers/avro/testcases/union-nullable-tag/message.json new file mode 100644 index 0000000000000..413cb9a9f72b0 --- /dev/null +++ b/plugins/parsers/avro/testcases/union-nullable-tag/message.json @@ -0,0 +1,14 @@ +{ + "some_union_in_a_tag": { + "string": "some_value" + }, + "switch_wwn": "10:00:50:EB:1A:0B:84:3A", + "statistics_collection_time": 1682509200092, + "up_time": 1166984904, + "cpu_utilization": { + "null": null + }, + "memory_utilization": { + "float": 20.0 + } +} diff --git a/plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf b/plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf new file mode 100644 index 0000000000000..1e6b92bbc6ac7 --- /dev/null +++ b/plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf @@ -0,0 +1,27 @@ +[[ inputs.file ]] + files = ["./testcases/union-nullable-tag/message.json"] + data_format = "avro" + + avro_format = "json" + avro_measurement = "Switch" + avro_tags = ["switch_wwn", "some_union_in_a_tag"] + avro_fields = ["up_time", "cpu_utilization", "memory_utilization", "statistics_collection_time"] + avro_timestamp = "statistics_collection_time" + avro_timestamp_format = "unix_ms" + avro_union_mode = "nullable" + avro_schema = ''' + { + "namespace": "com.brocade.streaming", + "name": "fibrechannel_switch_statistics", + "type": "record", + "version": "1", + "fields": [ + {"name": "some_union_in_a_tag", "type": ["null", "string"], "default": null, "doc": "Some union that is used in a tag"}, + {"name": "switch_wwn", "type": "string", "doc": "WWN of the Physical Switch."}, + {"name": "statistics_collection_time", "type": "long", "doc": "Epoch time when statistics is collected."}, + {"name": "up_time", "type": "long", "doc": "Switch Up Time (in hundredths of a second)"}, + {"name": "cpu_utilization", "type": ["null","float"], "default": null, "doc": "CPU Utilization in %"}, + {"name": "memory_utilization", "type": ["null", "float"], "default": null, "doc": "Memory Utilization in %"} + ] + } + ''' From 05435e47d325a8fd4bd2d49655650323fb060973 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 11 Dec 2024 21:25:03 +0100 Subject: [PATCH 072/170] chore(inputs.kinesis_consumer): Cleanup code (#16267) Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- plugins/inputs/kinesis_consumer/encoding.go | 45 +++ .../kinesis_consumer/kinesis_consumer.go | 250 ++++++---------- .../kinesis_consumer/kinesis_consumer_test.go | 277 +++++++----------- plugins/inputs/kinesis_consumer/logging.go | 27 ++ plugins/inputs/kinesis_consumer/noop_store.go | 7 + 5 files changed, 274 insertions(+), 332 deletions(-) create mode 100644 plugins/inputs/kinesis_consumer/encoding.go create mode 100644 plugins/inputs/kinesis_consumer/logging.go create mode 100644 plugins/inputs/kinesis_consumer/noop_store.go diff --git a/plugins/inputs/kinesis_consumer/encoding.go b/plugins/inputs/kinesis_consumer/encoding.go new file mode 100644 index 0000000000000..d2bad6fd8301d --- /dev/null +++ b/plugins/inputs/kinesis_consumer/encoding.go @@ -0,0 +1,45 @@ +package kinesis_consumer + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "fmt" + "io" +) + +type decodingFunc func([]byte) ([]byte, error) + +func processGzip(data []byte) ([]byte, error) { + zipData, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zipData.Close() + return io.ReadAll(zipData) +} + +func processZlib(data []byte) ([]byte, error) { + zlibData, err := zlib.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zlibData.Close() + return io.ReadAll(zlibData) +} + +func processNoOp(data []byte) ([]byte, error) { + return data, nil +} + +func getDecodingFunc(encoding string) (decodingFunc, error) { + switch encoding { + case "gzip": + return processGzip, nil + case "zlib": + return processZlib, nil + case "none", "identity", "": + return processNoOp, nil + } + return nil, fmt.Errorf("unknown content encoding %q", encoding) +} diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 819a36b0a33da..4c65aadef41fa 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -2,23 +2,15 @@ package kinesis_consumer import ( - "bytes" - "compress/gzip" - "compress/zlib" "context" _ "embed" "errors" - "fmt" - "io" - "math/big" - "strings" "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/aws/aws-sdk-go-v2/service/kinesis" - "github.com/aws/smithy-go/logging" consumer "github.com/harlow/kinesis-consumer" "github.com/harlow/kinesis-consumer/store/ddb" @@ -31,86 +23,85 @@ import ( //go:embed sample.conf var sampleConfig string -var ( - once sync.Once - // this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html - maxSeq = strToBint(strings.Repeat("9", 129)) - negOne *big.Int -) - -const ( - defaultMaxUndeliveredMessages = 1000 -) - -type ( - KinesisConsumer struct { - StreamName string `toml:"streamname"` - ShardIteratorType string `toml:"shard_iterator_type"` - DynamoDB *dynamoDB `toml:"checkpoint_dynamodb"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - ContentEncoding string `toml:"content_encoding"` - - Log telegraf.Logger `toml:"-"` - - cons *consumer.Consumer - parser telegraf.Parser - cancel context.CancelFunc - acc telegraf.TrackingAccumulator - sem chan struct{} +var once sync.Once + +type KinesisConsumer struct { + StreamName string `toml:"streamname"` + ShardIteratorType string `toml:"shard_iterator_type"` + DynamoDB *dynamoDB `toml:"checkpoint_dynamodb"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + ContentEncoding string `toml:"content_encoding"` + Log telegraf.Logger `toml:"-"` + common_aws.CredentialConfig + + cons *consumer.Consumer + parser telegraf.Parser + cancel context.CancelFunc + acc telegraf.TrackingAccumulator + sem chan struct{} + + checkpoint consumer.Store + checkpoints map[string]checkpoint + records map[telegraf.TrackingID]string + checkpointTex sync.Mutex + recordsTex sync.Mutex + wg sync.WaitGroup + + contentDecodingFunc decodingFunc + + lastSeqNum string +} - checkpoint consumer.Store - checkpoints map[string]checkpoint - records map[telegraf.TrackingID]string - checkpointTex sync.Mutex - recordsTex sync.Mutex - wg sync.WaitGroup +type dynamoDB struct { + AppName string `toml:"app_name"` + TableName string `toml:"table_name"` +} - processContentEncodingFunc processContent +type checkpoint struct { + streamName string + shardID string +} - lastSeqNum *big.Int +func (*KinesisConsumer) SampleConfig() string { + return sampleConfig +} - common_aws.CredentialConfig +func (k *KinesisConsumer) Init() error { + // Set defaults + if k.MaxUndeliveredMessages < 1 { + k.MaxUndeliveredMessages = 1000 } - dynamoDB struct { - AppName string `toml:"app_name"` - TableName string `toml:"table_name"` + if k.ShardIteratorType == "" { + k.ShardIteratorType = "TRIM_HORIZON" } - - checkpoint struct { - streamName string - shardID string + if k.ContentEncoding == "" { + k.ContentEncoding = "identity" } -) -type processContent func([]byte) ([]byte, error) - -func (*KinesisConsumer) SampleConfig() string { - return sampleConfig -} + f, err := getDecodingFunc(k.ContentEncoding) + if err != nil { + return err + } + k.contentDecodingFunc = f -func (k *KinesisConsumer) Init() error { - return k.configureProcessContentEncodingFunc() + return nil } func (k *KinesisConsumer) SetParser(parser telegraf.Parser) { k.parser = parser } -func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { - err := k.connect(ac) - if err != nil { - return err - } - - return nil +func (k *KinesisConsumer) Start(acc telegraf.Accumulator) error { + return k.connect(acc) } func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { if k.cons == nil { return k.connect(acc) } - k.lastSeqNum = maxSeq + // Enforce writing of last received sequence number + k.lastSeqNum = "" return nil } @@ -138,7 +129,7 @@ func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber stri return nil } -func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { +func (k *KinesisConsumer) connect(acc telegraf.Accumulator) error { cfg, err := k.CredentialConfig.Credentials() if err != nil { return err @@ -180,7 +171,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.cons = cons - k.acc = ac.WithTracking(k.MaxUndeliveredMessages) + k.acc = acc.WithTracking(k.MaxUndeliveredMessages) k.records = make(map[telegraf.TrackingID]string, k.MaxUndeliveredMessages) k.checkpoints = make(map[string]checkpoint, k.MaxUndeliveredMessages) k.sem = make(chan struct{}, k.MaxUndeliveredMessages) @@ -204,8 +195,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { case k.sem <- struct{}{}: break } - err := k.onMessage(k.acc, r) - if err != nil { + if err := k.onMessage(k.acc, r); err != nil { <-k.sem k.Log.Errorf("Scan parser error: %v", err) } @@ -223,7 +213,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { } func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { - data, err := k.processContentEncodingFunc(r.Data) + data, err := k.contentDecodingFunc(r.Data) if err != nil { return err } @@ -262,111 +252,37 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { delete(k.records, info.ID()) k.recordsTex.Unlock() - if info.Delivered() { - k.checkpointTex.Lock() - chk, ok := k.checkpoints[sequenceNum] - if !ok { - k.checkpointTex.Unlock() - continue - } - delete(k.checkpoints, sequenceNum) - k.checkpointTex.Unlock() - - // at least once - if strToBint(sequenceNum).Cmp(k.lastSeqNum) > 0 { - continue - } - - k.lastSeqNum = strToBint(sequenceNum) - if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { - k.Log.Debugf("Setting checkpoint failed: %v", err) - } - } else { + if !info.Delivered() { k.Log.Debug("Metric group failed to process") + continue } - } - } -} -func processGzip(data []byte) ([]byte, error) { - zipData, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - defer zipData.Close() - return io.ReadAll(zipData) -} - -func processZlib(data []byte) ([]byte, error) { - zlibData, err := zlib.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - defer zlibData.Close() - return io.ReadAll(zlibData) -} - -func processNoOp(data []byte) ([]byte, error) { - return data, nil -} - -func strToBint(s string) *big.Int { - n, ok := new(big.Int).SetString(s, 10) - if !ok { - return negOne - } - return n -} - -func (k *KinesisConsumer) configureProcessContentEncodingFunc() error { - switch k.ContentEncoding { - case "gzip": - k.processContentEncodingFunc = processGzip - case "zlib": - k.processContentEncodingFunc = processZlib - case "none", "identity", "": - k.processContentEncodingFunc = processNoOp - default: - return fmt.Errorf("unknown content encoding %q", k.ContentEncoding) - } - return nil -} - -type telegrafLoggerWrapper struct { - telegraf.Logger -} + if k.lastSeqNum != "" { + continue + } -func (t *telegrafLoggerWrapper) Log(args ...interface{}) { - t.Trace(args...) -} + // Store the sequence number at least once per gather cycle using the checkpoint + // storage (usually DynamoDB). + k.checkpointTex.Lock() + chk, ok := k.checkpoints[sequenceNum] + if !ok { + k.checkpointTex.Unlock() + continue + } + delete(k.checkpoints, sequenceNum) + k.checkpointTex.Unlock() -func (t *telegrafLoggerWrapper) Logf(classification logging.Classification, format string, v ...interface{}) { - switch classification { - case logging.Debug: - format = "DEBUG " + format - case logging.Warn: - format = "WARN" + format - default: - format = "INFO " + format + k.Log.Tracef("persisting sequence number %q for stream %q and shard %q", sequenceNum) + k.lastSeqNum = sequenceNum + if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { + k.Log.Errorf("Setting checkpoint failed: %v", err) + } + } } - t.Logger.Tracef(format, v...) } -// noopStore implements the storage interface with discard -type noopStore struct{} - -func (n noopStore) SetCheckpoint(_, _, _ string) error { return nil } -func (n noopStore) GetCheckpoint(_, _ string) (string, error) { return "", nil } - func init() { - negOne, _ = new(big.Int).SetString("-1", 10) - inputs.Add("kinesis_consumer", func() telegraf.Input { - return &KinesisConsumer{ - ShardIteratorType: "TRIM_HORIZON", - MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - lastSeqNum: maxSeq, - ContentEncoding: "identity", - } + return &KinesisConsumer{} }) } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go index e09e0df3717a6..b48372571b879 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -14,220 +14,167 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func TestKinesisConsumer_onMessage(t *testing.T) { +func TestInvalidCoding(t *testing.T) { + plugin := &KinesisConsumer{ + ContentEncoding: "notsupported", + } + require.ErrorContains(t, plugin.Init(), "unknown content encoding") +} + +func TestOnMessage(t *testing.T) { + // Prepare messages zlibBytpes, err := base64.StdEncoding.DecodeString( "eF5FjlFrgzAUhf9KuM+2aNB2zdsQ2xe3whQGW8qIeqdhaiSJK0P874u1Y4+Hc/jON0GHxoga858BgUF8fs5fzunHU5Jlj6cEPFDXHvXStGqsrsKWTapq44pW1SetxsF1a8qsRtGt0Yy" + "FKbUcrFT9UbYWtQH2frntkm/s7RInkNU6t9JpWNE5WBAFPo3CcHeg+9D703OziUOhCg6MQ/yakrspuZsyEjdYfsm+Jg2K1jZEfZLKQWUvFglylBobZXDLwSP8//EGpD4NNj7dUJpT6" + "hQY3W33h/AhCt84zDBf5l/MDl08", ) require.NoError(t, err) + gzippedBytes, err := base64.StdEncoding.DecodeString( "H4sIAAFXNGAAA0WOUWuDMBSF/0q4z7Zo0HbN2xDbF7fCFAZbyoh6p2FqJIkrQ/zvi7Vjj4dz+M43QYfGiBrznwGBQXx+zl/O6cdTkmWPpwQ8UNce9dK0aqyuwpZNqmrjilbVJ63GwXVr" + "yqxG0a3RjIUptRysVP1Rtha1AfZ+ue2Sb+ztEieQ1Tq30mlY0TlYEAU+jcJwd6D70PvTc7OJQ6EKDoxD/JqSuym5mzISN1h+yb4mDYrWNkR9kspBZS8WCXKUGhtlcMvBI/z/8QakPg02" + "Pt1QmlPqFBjdbfeH8CEK3zjMMF/mX0TaxZUpAQAA", ) require.NoError(t, err) - notZippedBytes := []byte(`{ - "messageType": "CONTROL_MESSAGE", - "owner": "CloudwatchLogs", - "logGroup": "", - "logStream": "", - "subscriptionFilters": [], - "logEvents": [ - { - "id": "", - "timestamp": 1510254469274, - "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}," - }, - { - "id": "", - "timestamp": 1510254469274, - "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}" - } - ] -}`) - parser := &json.Parser{ - MetricName: "json_test", - Query: "logEvents", - StringFields: []string{"message"}, - } - require.NoError(t, parser.Init()) - type fields struct { - ContentEncoding string - parser telegraf.Parser - records map[telegraf.TrackingID]string - } - type args struct { - r *consumer.Record - } - type expected struct { - numberOfMetrics int - messageContains string + notZippedBytes := []byte(` + { + "messageType": "CONTROL_MESSAGE", + "owner": "CloudwatchLogs", + "logGroup": "", + "logStream": "", + "subscriptionFilters": [], + "logEvents": [ + { + "id": "", + "timestamp": 1510254469274, + "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}," + }, + { + "id": "", + "timestamp": 1510254469274, + "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}" + } + ] } + `) + tests := []struct { - name string - fields fields - args args - wantErr bool - expected expected + name string + encoding string + records map[telegraf.TrackingID]string + args *consumer.Record + expectedNumber int + expectedContent string }{ { - name: "test no compression", - fields: fields{ - ContentEncoding: "none", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression", + encoding: "none", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test no compression via empty string for ContentEncoding", - fields: fields{ - ContentEncoding: "", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression via empty string for ContentEncoding", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test no compression via identity ContentEncoding", - fields: fields{ - ContentEncoding: "identity", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression via identity ContentEncoding", + encoding: "identity", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test no compression via no ContentEncoding", - fields: fields{ - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression via no ContentEncoding", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test gzip compression", - fields: fields{ - ContentEncoding: "gzip", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: gzippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test gzip compression", + encoding: "gzip", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: gzippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 1, - }, + expectedNumber: 1, + expectedContent: "bob", }, { - name: "test zlib compression", - fields: fields{ - ContentEncoding: "zlib", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: zlibBytpes, - SequenceNumber: aws.String("anything"), - }, + name: "test zlib compression", + encoding: "zlib", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: zlibBytpes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 1, - }, + expectedNumber: 1, + expectedContent: "bob", }, } - k := &KinesisConsumer{ - ContentEncoding: "notsupported", - } - err = k.Init() - require.Error(t, err) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - k := &KinesisConsumer{ - ContentEncoding: tt.fields.ContentEncoding, - parser: tt.fields.parser, - records: tt.fields.records, + // Prepare JSON parser + parser := &json.Parser{ + MetricName: "json_test", + Query: "logEvents", + StringFields: []string{"message"}, } - err := k.Init() - require.NoError(t, err) + require.NoError(t, parser.Init()) - acc := testutil.Accumulator{} - if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { - t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) + // Setup plugin + plugin := &KinesisConsumer{ + ContentEncoding: tt.encoding, + parser: parser, + records: tt.records, } + require.NoError(t, plugin.Init()) + + var acc testutil.Accumulator + require.NoError(t, plugin.onMessage(acc.WithTracking(tt.expectedNumber), tt.args)) - require.Len(t, acc.Metrics, tt.expected.numberOfMetrics) + actual := acc.GetTelegrafMetrics() + require.Len(t, actual, tt.expectedNumber) - for _, metric := range acc.Metrics { - if logEventMessage, ok := metric.Fields["message"]; ok { - require.Contains(t, logEventMessage.(string), tt.expected.messageContains) - } else { - t.Errorf("Expect logEvents to be present") - } + for _, metric := range actual { + raw, found := metric.GetField("message") + require.True(t, found, "no message present") + message, ok := raw.(string) + require.Truef(t, ok, "message not a string but %T", raw) + require.Contains(t, message, tt.expectedContent) } }) } diff --git a/plugins/inputs/kinesis_consumer/logging.go b/plugins/inputs/kinesis_consumer/logging.go new file mode 100644 index 0000000000000..82e9458654ea4 --- /dev/null +++ b/plugins/inputs/kinesis_consumer/logging.go @@ -0,0 +1,27 @@ +package kinesis_consumer + +import ( + "github.com/aws/smithy-go/logging" + + "github.com/influxdata/telegraf" +) + +type telegrafLoggerWrapper struct { + telegraf.Logger +} + +func (t *telegrafLoggerWrapper) Log(args ...interface{}) { + t.Trace(args...) +} + +func (t *telegrafLoggerWrapper) Logf(classification logging.Classification, format string, v ...interface{}) { + switch classification { + case logging.Debug: + format = "DEBUG " + format + case logging.Warn: + format = "WARN" + format + default: + format = "INFO " + format + } + t.Logger.Tracef(format, v...) +} diff --git a/plugins/inputs/kinesis_consumer/noop_store.go b/plugins/inputs/kinesis_consumer/noop_store.go new file mode 100644 index 0000000000000..f400fdc718b9f --- /dev/null +++ b/plugins/inputs/kinesis_consumer/noop_store.go @@ -0,0 +1,7 @@ +package kinesis_consumer + +// noopStore implements the storage interface with discard +type noopStore struct{} + +func (noopStore) SetCheckpoint(_, _, _ string) error { return nil } +func (noopStore) GetCheckpoint(_, _ string) (string, error) { return "", nil } From 795b8a9f963c6cb40f48cb5b66c99ea9c81a9759 Mon Sep 17 00:00:00 2001 From: Landon Clipp <11232769+LandonTClipp@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:31:28 -0600 Subject: [PATCH 073/170] docs(specs): Add `probe` as value to `startup_error_behavior` (#16052) --- docs/specs/tsd-006-startup-error-behavior.md | 13 ++++ docs/specs/tsd-009-probe-on-startup.md | 68 ++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 docs/specs/tsd-009-probe-on-startup.md diff --git a/docs/specs/tsd-006-startup-error-behavior.md b/docs/specs/tsd-006-startup-error-behavior.md index 33fd39d8b16c1..4ae8549546828 100644 --- a/docs/specs/tsd-006-startup-error-behavior.md +++ b/docs/specs/tsd-006-startup-error-behavior.md @@ -75,6 +75,19 @@ must *not* fail on startup errors and should continue running. On startup error, Telegraf must ignore the plugin as-if it was not configured at all, i.e. the plugin must be completely removed from processing. +### `probe` behavior + +When using the `probe` setting for the `startup_error_behavior` option Telegraf +must *not* fail on startup errors and should continue running. On startup error, +Telegraf must ignore the plugin as-if it was not configured at all, i.e. the +plugin must be completely removed from processing, similar to the `ignore` +behavior. Additionally, Telegraf must probe the plugin (as defined in +[TSD-009][tsd_009]) after startup, if it implements the `ProbePlugin` interface. +If probing is available *and* returns an error Telegraf must *ignore* the +plugin as-if it was not configured at all. + +[tsd_009]: /docs/specs/tsd-009-probe-on-startup.md + ## Plugin Requirements Plugins participating in handling startup errors must implement the `Start()` diff --git a/docs/specs/tsd-009-probe-on-startup.md b/docs/specs/tsd-009-probe-on-startup.md new file mode 100644 index 0000000000000..99eec04178b43 --- /dev/null +++ b/docs/specs/tsd-009-probe-on-startup.md @@ -0,0 +1,68 @@ +# Probing plugins after startup + +## Objective + +Allow Telegraf to probe plugins during startup to enable enhanced plugin error +detection like availability of hardware or services + +## Keywords + +inputs, outputs, startup, probe, error, ignore, behavior + +## Overview + +When plugins are first instantiated, Telegraf will call the plugin's `Start()` +method (for inputs) or `Connect()` (for outputs) which will initialize its +configuration based off of config options and the running environment. It is +sometimes the case that while the initialization step succeeds, the upstream +service in which the plugin relies on is not actually running, or is not capable +of being communicated with due to incorrect configuration or environmental +problems. In situations like this, Telegraf does not detect that the plugin's +upstream service is not functioning properly, and thus it will continually call +the plugin during each `Gather()` iteration. This often has the effect of +polluting journald and system logs with voluminous error messages, which creates +issues for system administrators who rely on such logs to identify other +unrelated system problems. + +More background discussion on this option, including other possible avenues, can +be viewed [here](https://github.com/influxdata/telegraf/issues/16028). + +## Probing + +Probing is an action whereby the plugin should ensure that the plugin will be +fully functional on a best effort basis. This may comprise communicating with +its external service, trying to access required devices, entities or executables +etc to ensure that the plugin will not produce errors during e.g. data collection +or data output. Probing must *not* produce, process or output any metrics. + +Plugins that support probing must implement the `ProbePlugin` interface. Such +plugins must behave in the following manner: + +1. Return an error if the external dependencies (hardware, services, +executables, etc.) of the plugin are not available. +2. Return an error if information cannot be gathered (in the case of inputs) or +sent (in the case of outputs) due to unrecoverable issues. For example, invalid +authentication, missing permissions, or non-existent endpoints. +3. Otherwise, return `nil` indicating the plugin will be fully functional. + +## Plugin Requirements + +Plugins that allow probing must implement the `ProbePlugin` interface. The +exact implementation depends on the plugin's functionality and requirements, +but generally it should take the same actions as it would during normal operation +e.g. calling `Gather()` or `Write()` and check if errors occur. If probing fails, +it must be safe to call the plugin's `Close()` method. + +Input plugins must *not* produce metrics, output plugins must *not* send any +metrics to the service. Plugins must *not* influence the later data processing or +collection by modifying the internal state of the plugin or the external state of the +service or hardware. For example, file-offsets or other service states must be +reset to not lose data during the first gather or write cycle. + +Plugins must return `nil` upon successful probing or an error otherwise. + +## Related Issues + +- [#16028](https://github.com/influxdata/telegraf/issues/16028) +- [#15916](https://github.com/influxdata/telegraf/pull/15916) +- [#16001](https://github.com/influxdata/telegraf/pull/16001) From 2ccc79ce2781e22efed29ce33a2ebe3e82c23c8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 11 Dec 2024 21:32:16 +0100 Subject: [PATCH 074/170] test(linters): Enable `testifylint`: `contains`, `encoded-compare` and `regexp` (#16262) --- .golangci.yml | 3 ++ plugins/serializers/json/json_test.go | 10 +++---- .../serializers/nowmetric/nowmetric_test.go | 30 ++++++++----------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a7eab4390f758..6c70d0cbc176a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -335,7 +335,9 @@ linters-settings: - blank-import - bool-compare - compares + - contains - empty + - encoded-compare - error-is-as - error-nil - expected-actual @@ -345,6 +347,7 @@ linters-settings: - len - negative-positive - nil-compare + - regexp - require-error - suite-broken-parallel - suite-dont-use-pkg diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index d562ca79f59d4..8c1b94aa655d7 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -196,10 +196,10 @@ func TestSerializeBatch(t *testing.T) { require.NoError(t, s.Init()) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`+"\n"), - buf, + `{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`, + string(buf), ) } @@ -220,7 +220,7 @@ func TestSerializeBatchSkipInf(t *testing.T) { require.NoError(t, s.Init()) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal(t, []byte(`{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`+"\n"), buf) + require.JSONEq(t, `{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`, string(buf)) } func TestSerializeBatchSkipInfAllFields(t *testing.T) { @@ -239,7 +239,7 @@ func TestSerializeBatchSkipInfAllFields(t *testing.T) { require.NoError(t, s.Init()) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal(t, []byte(`{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`+"\n"), buf) + require.JSONEq(t, `{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`, string(buf)) } func TestSerializeTransformationNonBatch(t *testing.T) { diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index 167963d461466..e0b2de3cce3a1 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -191,13 +191,11 @@ func TestSerializeBatch(t *testing.T) { s := &Serializer{} buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte( - `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ - `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]`, - ), - buf, + `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ + `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]`, + string(buf), ) } @@ -213,10 +211,10 @@ func TestSerializeJSONv2Format(t *testing.T) { s := &Serializer{Format: "jsonv2"} buf, err := s.Serialize(m) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte(`{"records":[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]}`), - buf, + `{"records":[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]}`, + string(buf), ) } @@ -233,15 +231,13 @@ func TestSerializeJSONv2FormatBatch(t *testing.T) { metrics := []telegraf.Metric{m, m} buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte( - `{"records":[`+ - `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ - `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}`+ - `]}`, - ), - buf, + `{"records":[`+ + `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ + `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}`+ + `]}`, + string(buf), ) } From e3ce01abf029c23fd16152d7290c92b1aa301ac8 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 11 Dec 2024 22:32:52 +0100 Subject: [PATCH 075/170] feat(inputs.systemd_units): Add active_enter_timestamp_us field (#16287) --- go.mod | 2 +- plugins/inputs/systemd_units/README.md | 1 + .../systemd_units/systemd_units_linux.go | 44 +++-- .../systemd_units/systemd_units_test.go | 158 +++++++++--------- 4 files changed, 113 insertions(+), 92 deletions(-) diff --git a/go.mod b/go.mod index 9f8ae22fefcc5..fa91210beccf1 100644 --- a/go.mod +++ b/go.mod @@ -89,7 +89,6 @@ require ( github.com/go-sql-driver/mysql v1.8.1 github.com/go-stomp/stomp v2.1.4+incompatible github.com/gobwas/glob v0.2.3 - github.com/godbus/dbus/v5 v5.1.0 github.com/gofrs/uuid/v5 v5.3.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec @@ -348,6 +347,7 @@ require ( github.com/goburrow/serial v0.1.1-0.20211022031912-bfb69110f8dd // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.1 // indirect diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index 5364f43ef574c..679d5819e9691 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -94,6 +94,7 @@ The following *additional* metrics are available with `details = true`: - swap_current (uint, current swap usage) - swap_peak (uint, peak swap usage) - mem_avail (uint, available memory for this unit) + - active_enter_timestamp_us (uint, timestamp in us when entered the state) ### Load diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index 443095ee203d8..2500b6cce62be 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -123,17 +123,18 @@ type client interface { ListUnitFilesByPatternsContext(ctx context.Context, states, pattern []string) ([]dbus.UnitFile, error) ListUnitsByNamesContext(ctx context.Context, units []string) ([]dbus.UnitStatus, error) GetUnitTypePropertiesContext(ctx context.Context, unit, unitType string) (map[string]interface{}, error) - GetUnitPropertyContext(ctx context.Context, unit, propertyName string) (*dbus.Property, error) + GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) ListUnitsContext(ctx context.Context) ([]dbus.UnitStatus, error) } type archParams struct { - client client - pattern []string - filter filter.Filter - unitTypeDBus string - scope string - user string + client client + pattern []string + filter filter.Filter + unitTypeDBus string + scope string + user string + warnUnitProps map[string]bool } func (s *SystemdUnits) Init() error { @@ -176,6 +177,8 @@ func (s *SystemdUnits) Init() error { return fmt.Errorf("invalid 'scope' %q", s.Scope) } + s.warnUnitProps = make(map[string]bool) + return nil } @@ -374,26 +377,35 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { } // Get required unit file properties - var unitFileState string - if v, err := s.client.GetUnitPropertyContext(ctx, state.Name, "UnitFileState"); err == nil { - unitFileState = strings.Trim(v.Value.String(), `'"`) + unitProperties, err := s.client.GetUnitPropertiesContext(ctx, state.Name) + if err != nil && !s.warnUnitProps[state.Name] { + s.Log.Warnf("Cannot read unit properties for %q: %v", state.Name, err) + s.warnUnitProps[state.Name] = true + } + + // Set tags + if v, found := unitProperties["UnitFileState"]; found { + tags["state"] = v.(string) } - var unitFilePreset string - if v, err := s.client.GetUnitPropertyContext(ctx, state.Name, "UnitFilePreset"); err == nil { - unitFilePreset = strings.Trim(v.Value.String(), `'"`) + if v, found := unitProperties["UnitFilePreset"]; found { + tags["preset"] = v.(string) } - tags["state"] = unitFileState - tags["preset"] = unitFilePreset + // Set fields + if v, found := unitProperties["ActiveEnterTimestamp"]; found { + fields["active_enter_timestamp_us"] = v + } fields["status_errno"] = properties["StatusErrno"] fields["restarts"] = properties["NRestarts"] fields["pid"] = properties["MainPID"] + fields["mem_current"] = properties["MemoryCurrent"] fields["mem_peak"] = properties["MemoryPeak"] + fields["mem_avail"] = properties["MemoryAvailable"] + fields["swap_current"] = properties["MemorySwapCurrent"] fields["swap_peak"] = properties["MemorySwapPeak"] - fields["mem_avail"] = properties["MemoryAvailable"] // Sanitize unset memory fields for k, value := range fields { diff --git a/plugins/inputs/systemd_units/systemd_units_test.go b/plugins/inputs/systemd_units/systemd_units_test.go index 7add99775d661..3c2c711110ac0 100644 --- a/plugins/inputs/systemd_units/systemd_units_test.go +++ b/plugins/inputs/systemd_units/systemd_units_test.go @@ -4,7 +4,6 @@ package systemd_units import ( "context" - "errors" "fmt" "math" "os/user" @@ -13,7 +12,6 @@ import ( "time" sdbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -25,12 +23,13 @@ import ( ) type properties struct { - uf *sdbus.UnitFile - utype string - state *sdbus.UnitStatus - ufPreset string - ufState string - properties map[string]interface{} + uf *sdbus.UnitFile + utype string + state *sdbus.UnitStatus + ufPreset string + ufState string + ufActiveEnter uint64 + properties map[string]interface{} } func TestDefaultPattern(t *testing.T) { @@ -284,6 +283,7 @@ func TestListFiles(t *testing.T) { } func TestShow(t *testing.T) { + enter := time.Now().UnixMicro() tests := []struct { name string properties map[string]properties @@ -301,8 +301,9 @@ func TestShow(t *testing.T) { ActiveState: "active", SubState: "running", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: uint64(enter), properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 0, @@ -328,17 +329,18 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": 0, - "sub_code": 0, - "status_errno": 0, - "restarts": 1, - "mem_current": uint64(1000), - "mem_peak": uint64(2000), - "swap_current": uint64(3000), - "swap_peak": uint64(4000), - "mem_avail": uint64(5000), - "pid": 9999, + "load_code": 0, + "active_code": 0, + "sub_code": 0, + "status_errno": 0, + "restarts": 1, + "mem_current": uint64(1000), + "mem_peak": uint64(2000), + "swap_current": uint64(3000), + "swap_peak": uint64(4000), + "mem_avail": uint64(5000), + "pid": 9999, + "active_enter_timestamp_us": uint64(enter), }, time.Unix(0, 0), ), @@ -355,8 +357,9 @@ func TestShow(t *testing.T) { ActiveState: "active", SubState: "exited", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: 0, properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 0, @@ -376,16 +379,17 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": 0, - "sub_code": 4, - "status_errno": 0, - "restarts": 0, - "mem_current": uint64(0), - "mem_peak": uint64(0), - "swap_current": uint64(0), - "swap_peak": uint64(0), - "mem_avail": uint64(0), + "load_code": 0, + "active_code": 0, + "sub_code": 4, + "status_errno": 0, + "restarts": 0, + "mem_current": uint64(0), + "mem_peak": uint64(0), + "swap_current": uint64(0), + "swap_peak": uint64(0), + "mem_avail": uint64(0), + "active_enter_timestamp_us": uint64(0), }, time.Unix(0, 0), ), @@ -402,8 +406,9 @@ func TestShow(t *testing.T) { ActiveState: "failed", SubState: "failed", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: uint64(enter), properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 10, @@ -428,16 +433,17 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": 3, - "sub_code": 12, - "status_errno": 10, - "restarts": 1, - "mem_current": uint64(1000), - "mem_peak": uint64(2000), - "swap_current": uint64(3000), - "swap_peak": uint64(4000), - "mem_avail": uint64(5000), + "load_code": 0, + "active_code": 3, + "sub_code": 12, + "status_errno": 10, + "restarts": 1, + "mem_current": uint64(1000), + "mem_peak": uint64(2000), + "swap_current": uint64(3000), + "swap_peak": uint64(4000), + "mem_avail": uint64(5000), + "active_enter_timestamp_us": uint64(enter), }, time.Unix(0, 0), ), @@ -454,8 +460,9 @@ func TestShow(t *testing.T) { ActiveState: "inactive", SubState: "dead", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: uint64(0), properties: map[string]interface{}{ "Id": "example.service", }, @@ -473,14 +480,15 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 2, - "active_code": 2, - "sub_code": 1, - "mem_current": uint64(0), - "mem_peak": uint64(0), - "swap_current": uint64(0), - "swap_peak": uint64(0), - "mem_avail": uint64(0), + "load_code": 2, + "active_code": 2, + "sub_code": 1, + "mem_current": uint64(0), + "mem_peak": uint64(0), + "swap_current": uint64(0), + "swap_peak": uint64(0), + "mem_avail": uint64(0), + "active_enter_timestamp_us": uint64(0), }, time.Unix(0, 0), ), @@ -517,8 +525,9 @@ func TestShow(t *testing.T) { ActiveState: "inactive", SubState: "dead", }, - ufPreset: "disabled", - ufState: "disabled", + ufPreset: "disabled", + ufState: "disabled", + ufActiveEnter: uint64(0), properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 0, @@ -543,16 +552,17 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": int64(2), - "sub_code": 1, - "status_errno": 0, - "restarts": 0, - "mem_current": uint64(0), - "mem_peak": uint64(0), - "swap_current": uint64(0), - "swap_peak": uint64(0), - "mem_avail": uint64(0), + "load_code": 0, + "active_code": int64(2), + "sub_code": 1, + "status_errno": 0, + "restarts": 0, + "mem_current": uint64(0), + "mem_peak": uint64(0), + "swap_current": uint64(0), + "swap_peak": uint64(0), + "mem_avail": uint64(0), + "active_enter_timestamp_us": uint64(0), }, time.Unix(0, 0), ), @@ -974,19 +984,17 @@ func (c *fakeClient) GetUnitTypePropertiesContext(_ context.Context, unit, unitT return u.properties, nil } -func (c *fakeClient) GetUnitPropertyContext(_ context.Context, unit, propertyName string) (*sdbus.Property, error) { +func (c *fakeClient) GetUnitPropertiesContext(_ context.Context, unit string) (map[string]interface{}, error) { u, found := c.units[unit] if !found { return nil, nil } - switch propertyName { - case "UnitFileState": - return &sdbus.Property{Name: propertyName, Value: dbus.MakeVariant(u.ufState)}, nil - case "UnitFilePreset": - return &sdbus.Property{Name: propertyName, Value: dbus.MakeVariant(u.ufPreset)}, nil - } - return nil, errors.New("unknown property") + return map[string]interface{}{ + "UnitFileState": u.ufState, + "UnitFilePreset": u.ufPreset, + "ActiveEnterTimestamp": u.ufActiveEnter, + }, nil } func (c *fakeClient) ListUnitsContext(_ context.Context) ([]sdbus.UnitStatus, error) { From d2e032eb4a8519cc556b0d887ce8e1395b7591e1 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 11 Dec 2024 22:33:31 +0100 Subject: [PATCH 076/170] fix(agent): Skip initialization of second processor state if requested (#16290) --- agent/agent.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 7f00fc6ca9ff5..ed19d3f764c38 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -231,10 +231,12 @@ func (a *Agent) InitPlugins() error { return fmt.Errorf("could not initialize aggregator %s: %w", aggregator.LogName(), err) } } - for _, processor := range a.Config.AggProcessors { - err := processor.Init() - if err != nil { - return fmt.Errorf("could not initialize processor %s: %w", processor.LogName(), err) + if !a.Config.Agent.SkipProcessorsAfterAggregators { + for _, processor := range a.Config.AggProcessors { + err := processor.Init() + if err != nil { + return fmt.Errorf("could not initialize processor %s: %w", processor.LogName(), err) + } } } for _, output := range a.Config.Outputs { From 3e644a05c610157c735b9ff7f8f17e9b7307e640 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Thu, 12 Dec 2024 10:52:25 +0800 Subject: [PATCH 077/170] Update plugins/outputs/inlong/README.md Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- plugins/outputs/inlong/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index a782875d2fda6..0467f36c4e4f8 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -31,7 +31,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - ## Suggest using CSV format here, as Inlong is also processed in CSV format data_format = "csv" ## The delimiter used when serializing data in CSV format needs to be consistent with the delimiter From 1d77b7cadde302a975c377d9ece87e2013c77d10 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 12 Dec 2024 11:03:35 +0800 Subject: [PATCH 078/170] feat(outputs): Modify code based on comments --- plugins/outputs/inlong/inlong.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index eb315eee77cb1..3817bb9e2d04e 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -15,7 +15,7 @@ import ( //go:embed sample.conf var sampleConfig string -const ManagerURLSuffix = "/inlong/manager/openapi/dataproxy/getIpList" +const managerURLSuffix = "/inlong/manager/openapi/dataproxy/getIpList" type Inlong struct { GroupID string `toml:"group_id"` @@ -37,7 +37,7 @@ func (i *Inlong) SetSerializer(serializer telegraf.Serializer) { } func (i *Inlong) Connect() error { - producer, err := i.producerFunc(i.GroupID, i.ManagerURL+ManagerURLSuffix) + producer, err := i.producerFunc(i.GroupID, i.ManagerURL+managerURLSuffix) if err != nil { return &internal.StartupError{Err: err, Retry: true} } @@ -71,12 +71,12 @@ func (i *Inlong) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("inlong", func() telegraf.Output { return &Inlong{ - producerFunc: NewProducer, + producerFunc: newProducer, } }) } -func NewProducer(groupID, managerURL string) (dataproxy.Client, error) { +func newProducer(groupID, managerURL string) (dataproxy.Client, error) { producer, err := dataproxy.NewClient( dataproxy.WithGroupID(groupID), dataproxy.WithURL(managerURL), From 36553aec1e7f5ae14feb9d24d3d6a666a7714156 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 10:45:53 -0600 Subject: [PATCH 079/170] chore(deps): Bump golang.org/x/crypto from 0.29.0 to 0.31.0 (#16297) --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index fa91210beccf1..bd7f76df4317e 100644 --- a/go.mod +++ b/go.mod @@ -211,14 +211,14 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net v0.0.0-20240925182052-1207426daebd go.step.sm/crypto v0.54.0 - golang.org/x/crypto v0.29.0 + golang.org/x/crypto v0.31.0 golang.org/x/mod v0.21.0 golang.org/x/net v0.31.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 gonum.org/v1/gonum v0.15.1 google.golang.org/api v0.203.0 diff --git a/go.sum b/go.sum index 58b901287ccef..628bb6e28e1ee 100644 --- a/go.sum +++ b/go.sum @@ -2550,8 +2550,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2753,8 +2753,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2891,8 +2891,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2910,8 +2910,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2931,8 +2931,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From e15a3c8dc6ba0ae3de621429b24b8687b529ffeb Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 13 Dec 2024 17:46:37 +0100 Subject: [PATCH 080/170] chore(agent): Add warning about changing default for 'skip_processors_after_aggregators' (#16302) --- CHANGELOG.md | 10 ++++++++++ agent/agent.go | 36 ++++++++++++++++++++++++++++++++---- config/config.go | 2 +- 3 files changed, 43 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a022cbf4d0dac..0d73b17fcc318 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,16 @@ # Changelog +## Unreleased + +### Important Changes + +- The default value of `skip_processors_after_aggregators` will change to `true` + with Telegraf `v1.40.0`, skip running the processors again after aggregators! + If you need the current default behavior, please explicitly set the option to + `false`! To silence the warning and use the future default behavior, please + explicitly set the option to `true`. + ## v1.33.0 [2024-12-09] ### New Plugins diff --git a/agent/agent.go b/agent/agent.go index ed19d3f764c38..d4c284ac905a6 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/fatih/color" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -106,6 +107,15 @@ func (a *Agent) Run(ctx context.Context) error { time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) + // Set the default for processor skipping + if a.Config.Agent.SkipProcessorsAfterAggregators == nil { + msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! ` + msg += `If you need the current default behavior, please explicitly set the option to 'false'!` + log.Print("W! [agent] ", color.YellowString(msg)) + skipProcessorsAfterAggregators := false + a.Config.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators + } + log.Printf("D! [agent] Initializing plugins") if err := a.InitPlugins(); err != nil { return err @@ -136,7 +146,7 @@ func (a *Agent) Run(ctx context.Context) error { var au *aggregatorUnit if len(a.Config.Aggregators) != 0 { aggC := next - if len(a.Config.AggProcessors) != 0 && !a.Config.Agent.SkipProcessorsAfterAggregators { + if len(a.Config.AggProcessors) != 0 && !*a.Config.Agent.SkipProcessorsAfterAggregators { aggC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { return err @@ -231,7 +241,7 @@ func (a *Agent) InitPlugins() error { return fmt.Errorf("could not initialize aggregator %s: %w", aggregator.LogName(), err) } } - if !a.Config.Agent.SkipProcessorsAfterAggregators { + if !*a.Config.Agent.SkipProcessorsAfterAggregators { for _, processor := range a.Config.AggProcessors { err := processor.Init() if err != nil { @@ -1000,6 +1010,15 @@ func (a *Agent) Test(ctx context.Context, wait time.Duration) error { // outputC. After gathering pauses for the wait duration to allow service // inputs to run. func (a *Agent) runTest(ctx context.Context, wait time.Duration, outputC chan<- telegraf.Metric) error { + // Set the default for processor skipping + if a.Config.Agent.SkipProcessorsAfterAggregators == nil { + msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! ` + msg += `If you need the current default behavior, please explicitly set the option to 'false'!` + log.Print("W! [agent] ", color.YellowString(msg)) + skipProcessorsAfterAggregators := false + a.Config.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators + } + log.Printf("D! [agent] Initializing plugins") if err := a.InitPlugins(); err != nil { return err @@ -1013,7 +1032,7 @@ func (a *Agent) runTest(ctx context.Context, wait time.Duration, outputC chan<- var au *aggregatorUnit if len(a.Config.Aggregators) != 0 { procC := next - if len(a.Config.AggProcessors) != 0 && !a.Config.Agent.SkipProcessorsAfterAggregators { + if len(a.Config.AggProcessors) != 0 && !*a.Config.Agent.SkipProcessorsAfterAggregators { var err error procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { @@ -1096,6 +1115,15 @@ func (a *Agent) Once(ctx context.Context, wait time.Duration) error { // outputC. After gathering pauses for the wait duration to allow service // inputs to run. func (a *Agent) runOnce(ctx context.Context, wait time.Duration) error { + // Set the default for processor skipping + if a.Config.Agent.SkipProcessorsAfterAggregators == nil { + msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! ` + msg += `If you need the current default behavior, please explicitly set the option to 'false'!` + log.Print("W! [agent] ", color.YellowString(msg)) + skipProcessorsAfterAggregators := false + a.Config.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators + } + log.Printf("D! [agent] Initializing plugins") if err := a.InitPlugins(); err != nil { return err @@ -1113,7 +1141,7 @@ func (a *Agent) runOnce(ctx context.Context, wait time.Duration) error { var au *aggregatorUnit if len(a.Config.Aggregators) != 0 { procC := next - if len(a.Config.AggProcessors) != 0 && !a.Config.Agent.SkipProcessorsAfterAggregators { + if len(a.Config.AggProcessors) != 0 && !*a.Config.Agent.SkipProcessorsAfterAggregators { procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { return err diff --git a/config/config.go b/config/config.go index 6a71646b095da..4790f03e82e18 100644 --- a/config/config.go +++ b/config/config.go @@ -279,7 +279,7 @@ type AgentConfig struct { // Flag to skip running processors after aggregators // By default, processors are run a second time after aggregators. Changing // this setting to true will skip the second run of processors. - SkipProcessorsAfterAggregators bool `toml:"skip_processors_after_aggregators"` + SkipProcessorsAfterAggregators *bool `toml:"skip_processors_after_aggregators"` // Number of attempts to obtain a remote configuration via a URL during // startup. Set to -1 for unlimited attempts. From 2bd4559bc142ef36a1c3dfd4999e066443a836d1 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 13 Dec 2024 18:23:50 +0100 Subject: [PATCH 081/170] chore(processors.unpivot): Cleanup code and improve performance (#16299) --- plugins/processors/unpivot/unpivot.go | 45 +++---- plugins/processors/unpivot/unpivot_test.go | 147 +++++++++++++++------ 2 files changed, 124 insertions(+), 68 deletions(-) diff --git a/plugins/processors/unpivot/unpivot.go b/plugins/processors/unpivot/unpivot.go index 3f41d6bb7b9ba..53bfb25aebe27 100644 --- a/plugins/processors/unpivot/unpivot.go +++ b/plugins/processors/unpivot/unpivot.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/processors" ) @@ -18,30 +19,15 @@ type Unpivot struct { ValueKey string `toml:"value_key"` } -func copyWithoutFields(metric telegraf.Metric) telegraf.Metric { - m := metric.Copy() - - fieldKeys := make([]string, 0, len(m.FieldList())) - for _, field := range m.FieldList() { - fieldKeys = append(fieldKeys, field.Key) - } - - for _, fk := range fieldKeys { - m.RemoveField(fk) - } - - return m -} - func (*Unpivot) SampleConfig() string { return sampleConfig } func (p *Unpivot) Init() error { switch p.FieldNameAs { - case "metric": case "", "tag": p.FieldNameAs = "tag" + case "metric": default: return fmt.Errorf("unrecognized metric mode: %q", p.FieldNameAs) } @@ -63,27 +49,28 @@ func (p *Unpivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { } results := make([]telegraf.Metric, 0, fieldCount) - for _, m := range metrics { - base := m - if wm, ok := m.(telegraf.UnwrappableMetric); ok { - base = wm.Unwrap() + for _, src := range metrics { + // Create a copy without fields and tracking information + base := metric.New(src.Name(), make(map[string]string), make(map[string]interface{}), src.Time()) + for _, t := range src.TagList() { + base.AddTag(t.Key, t.Value) } - base = copyWithoutFields(base) - for _, field := range m.FieldList() { - newMetric := base.Copy() - newMetric.AddField(p.ValueKey, field.Value) + // Create a new metric per field and add it to the output + for _, field := range src.FieldList() { + m := base.Copy() + m.AddField(p.ValueKey, field.Value) switch p.FieldNameAs { case "metric": - newMetric.SetName(field.Key) - case "", "tag": - newMetric.AddTag(p.TagKey, field.Key) + m.SetName(field.Key) + case "tag": + m.AddTag(p.TagKey, field.Key) } - results = append(results, newMetric) + results = append(results, m) } - m.Accept() + src.Accept() } return results } diff --git a/plugins/processors/unpivot/unpivot_test.go b/plugins/processors/unpivot/unpivot_test.go index 0152513159ad8..b44632db3ec1b 100644 --- a/plugins/processors/unpivot/unpivot_test.go +++ b/plugins/processors/unpivot/unpivot_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestUnpivot_defaults(t *testing.T) { +func TestDefaults(t *testing.T) { unpivot := &Unpivot{} require.NoError(t, unpivot.Init()) require.Equal(t, "tag", unpivot.FieldNameAs) @@ -20,25 +20,25 @@ func TestUnpivot_defaults(t *testing.T) { require.Equal(t, "value", unpivot.ValueKey) } -func TestUnpivot_invalidMetricMode(t *testing.T) { +func TestInvalidMetricMode(t *testing.T) { unpivot := &Unpivot{FieldNameAs: "unknown"} require.Error(t, unpivot.Init()) } -func TestUnpivot_originalMode(t *testing.T) { +func TestOriginalMode(t *testing.T) { now := time.Now() tests := []struct { name string - unpivot *Unpivot + tagKey string + valueKey string + metrics []telegraf.Metric expected []telegraf.Metric }{ { - name: "simple", - unpivot: &Unpivot{ - TagKey: "name", - ValueKey: "value", - }, + name: "simple", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -61,11 +61,9 @@ func TestUnpivot_originalMode(t *testing.T) { }, }, { - name: "multi fields", - unpivot: &Unpivot{ - TagKey: "name", - ValueKey: "value", - }, + name: "multi fields", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -100,27 +98,33 @@ func TestUnpivot_originalMode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual := tt.unpivot.Apply(tt.metrics...) + plugin := &Unpivot{ + TagKey: tt.tagKey, + ValueKey: tt.valueKey, + } + require.NoError(t, plugin.Init()) + + actual := plugin.Apply(tt.metrics...) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics()) }) } } -func TestUnpivot_fieldMode(t *testing.T) { +func TestFieldMode(t *testing.T) { now := time.Now() tests := []struct { - name string - unpivot *Unpivot - metrics []telegraf.Metric - expected []telegraf.Metric + name string + fieldNameAs string + tagKey string + valueKey string + metrics []telegraf.Metric + expected []telegraf.Metric }{ { - name: "simple", - unpivot: &Unpivot{ - FieldNameAs: "metric", - TagKey: "name", - ValueKey: "value", - }, + name: "simple", + fieldNameAs: "metric", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -141,12 +145,10 @@ func TestUnpivot_fieldMode(t *testing.T) { }, }, { - name: "multi fields", - unpivot: &Unpivot{ - FieldNameAs: "metric", - TagKey: "name", - ValueKey: "value", - }, + name: "multi fields", + fieldNameAs: "metric", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -175,12 +177,10 @@ func TestUnpivot_fieldMode(t *testing.T) { }, }, { - name: "multi fields and tags", - unpivot: &Unpivot{ - FieldNameAs: "metric", - TagKey: "name", - ValueKey: "value", - }, + name: "multi fields and tags", + fieldNameAs: "metric", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ @@ -217,7 +217,14 @@ func TestUnpivot_fieldMode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual := tt.unpivot.Apply(tt.metrics...) + plugin := &Unpivot{ + FieldNameAs: tt.fieldNameAs, + TagKey: tt.tagKey, + ValueKey: tt.valueKey, + } + require.NoError(t, plugin.Init()) + + actual := plugin.Apply(tt.metrics...) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics()) }) } @@ -247,6 +254,8 @@ func TestTrackedMetricNotLost(t *testing.T) { // Process expected metrics and compare with resulting metrics plugin := &Unpivot{TagKey: "name", ValueKey: "value"} + require.NoError(t, plugin.Init()) + actual := plugin.Apply(input...) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) @@ -262,3 +271,63 @@ func TestTrackedMetricNotLost(t *testing.T) { return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(input)) } + +func BenchmarkAsTag(b *testing.B) { + input := metric.New( + "test", + map[string]string{ + "source": "device A", + "location": "main building", + }, + map[string]interface{}{ + "field0": 0.1, + "field1": 1.2, + "field2": 2.3, + "field3": 3.4, + "field4": 4.5, + "field5": 5.6, + "field6": 6.7, + "field7": 7.8, + "field8": 8.9, + "field9": 9.0, + }, + time.Now(), + ) + + plugin := &Unpivot{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + plugin.Apply(input) + } +} + +func BenchmarkAsMetric(b *testing.B) { + input := metric.New( + "test", + map[string]string{ + "source": "device A", + "location": "main building", + }, + map[string]interface{}{ + "field0": 0.1, + "field1": 1.2, + "field2": 2.3, + "field3": 3.4, + "field4": 4.5, + "field5": 5.6, + "field6": 6.7, + "field7": 7.8, + "field8": 8.9, + "field9": 9.0, + }, + time.Now(), + ) + + plugin := &Unpivot{FieldNameAs: "metric"} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + plugin.Apply(input) + } +} From 516b8cfbd16dc1eade0034fe873bb23a242bee04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 13 Dec 2024 18:26:34 +0100 Subject: [PATCH 082/170] chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[a-e]` (#16263) --- plugins/inputs/aerospike/aerospike.go | 31 ++++----- plugins/inputs/aerospike/aerospike_test.go | 20 ++---- plugins/inputs/aliyuncms/aliyuncms_test.go | 2 +- plugins/inputs/amd_rocm_smi/amd_rocm_smi.go | 2 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 6 +- plugins/inputs/apache/apache.go | 4 +- plugins/inputs/azure_monitor/azure_monitor.go | 4 +- .../azure_monitor/azure_monitor_test.go | 10 +-- plugins/inputs/bcache/bcache.go | 4 +- plugins/inputs/bond/bond.go | 4 +- plugins/inputs/bond/bond_test.go | 4 +- plugins/inputs/burrow/burrow.go | 8 +-- .../cisco_telemetry_mdt.go | 10 +-- plugins/inputs/clickhouse/clickhouse.go | 22 +++---- plugins/inputs/cloud_pubsub/cloud_pubsub.go | 2 +- .../cloud_pubsub_push/cloud_pubsub_push.go | 2 +- .../cloud_pubsub_push_test.go | 6 +- plugins/inputs/cloudwatch/cloudwatch_test.go | 20 +++--- .../cloudwatch_metric_streams.go | 2 +- plugins/inputs/couchdb/couchdb.go | 64 +++++++++---------- plugins/inputs/csgo/csgo.go | 4 +- .../inputs/ctrlx_datalayer/ctrlx_datalayer.go | 2 +- plugins/inputs/dcos/client.go | 4 +- plugins/inputs/dcos/creds.go | 6 +- plugins/inputs/dcos/dcos.go | 24 +++---- plugins/inputs/dcos/dcos_test.go | 9 +-- plugins/inputs/docker_log/docker_log.go | 2 +- plugins/inputs/docker_log/docker_log_test.go | 2 +- plugins/inputs/dovecot/dovecot.go | 4 +- plugins/inputs/ecs/ecs.go | 4 +- plugins/inputs/elasticsearch/elasticsearch.go | 2 +- .../elasticsearch_query.go | 2 +- plugins/inputs/ethtool/ethtool_linux.go | 6 +- plugins/inputs/ethtool/ethtool_test.go | 10 +-- plugins/inputs/exec/exec.go | 4 +- plugins/inputs/exec/exec_test.go | 3 +- .../{run_notwinodws.go => run_notwindows.go} | 2 +- plugins/inputs/exec/run_windows.go | 2 +- plugins/inputs/execd/execd_test.go | 6 +- plugins/inputs/execd/shim/input.go | 6 +- plugins/inputs/execd/shim/shim_test.go | 22 ++----- 41 files changed, 168 insertions(+), 185 deletions(-) rename plugins/inputs/exec/{run_notwinodws.go => run_notwindows.go} (96%) diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 26a5677075d63..52732cff85cdc 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -121,11 +121,11 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro nodes := c.GetNodes() for _, n := range nodes { nodeHost := n.GetHost().String() - stats, err := a.getNodeInfo(n, asInfoPolicy) + stats, err := getNodeInfo(n, asInfoPolicy) if err != nil { return err } - a.parseNodeInfo(acc, stats, nodeHost, n.GetName()) + parseNodeInfo(acc, stats, nodeHost, n.GetName()) namespaces, err := a.getNamespaces(n, asInfoPolicy) if err != nil { @@ -135,12 +135,12 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro if !a.DisableQueryNamespaces { // Query Namespaces for _, namespace := range namespaces { - stats, err = a.getNamespaceInfo(namespace, n, asInfoPolicy) + stats, err = getNamespaceInfo(namespace, n, asInfoPolicy) if err != nil { continue } - a.parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName()) + parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName()) if a.EnableTTLHistogram { err = a.getTTLHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy) @@ -162,12 +162,12 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro if err == nil { for _, namespaceSet := range namespaceSets { namespace, set := splitNamespaceSet(namespaceSet) - stats, err := a.getSetInfo(namespaceSet, n, asInfoPolicy) + stats, err := getSetInfo(namespaceSet, n, asInfoPolicy) if err != nil { continue } - a.parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName()) + parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName()) if a.EnableTTLHistogram { err = a.getTTLHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy) @@ -189,7 +189,7 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro return nil } -func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { stats, err := n.RequestInfo(infoPolicy, "statistics") if err != nil { return nil, err @@ -198,7 +198,7 @@ func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[stri return stats, nil } -func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) { +func parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -231,7 +231,7 @@ func (a *Aerospike) getNamespaces(n *as.Node, infoPolicy *as.InfoPolicy) ([]stri return namespaces, nil } -func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { stats, err := n.RequestInfo(infoPolicy, "namespace/"+namespace) if err != nil { return nil, err @@ -239,7 +239,8 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *a return stats, err } -func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) { + +func parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -296,7 +297,7 @@ func (a *Aerospike) getSets(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, er return namespaceSets, nil } -func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { stats, err := n.RequestInfo(infoPolicy, "sets/"+namespaceSet) if err != nil { return nil, err @@ -304,7 +305,7 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.I return stats, nil } -func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) { +func parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) { stat := strings.Split( strings.TrimSuffix( stats["sets/"+namespaceSet], ";"), ":") @@ -327,7 +328,7 @@ func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]stri } func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error { - stats, err := a.getHistogram(namespace, set, "ttl", n, infoPolicy) + stats, err := getHistogram(namespace, set, "ttl", n, infoPolicy) if err != nil { return err } @@ -339,7 +340,7 @@ func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespac } func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error { - stats, err := a.getHistogram(namespace, set, "object-size-linear", n, infoPolicy) + stats, err := getHistogram(namespace, set, "object-size-linear", n, infoPolicy) if err != nil { return err } @@ -350,7 +351,7 @@ func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostP return nil } -func (a *Aerospike) getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { var queryArg string if len(set) > 0 { queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v;set=%v", histogramType, namespace, set) diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 1f3090f7a23b9..73d7b977dab72 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -309,9 +309,6 @@ func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { } func TestParseNodeInfo(t *testing.T) { - a := &Aerospike{} - var acc testutil.Accumulator - stats := map[string]string{ "statistics": "early_tsvc_from_proxy_error=0;cluster_principal=BB9020012AC4202;cluster_is_member=true", } @@ -327,14 +324,12 @@ func TestParseNodeInfo(t *testing.T) { "node_name": "TestNodeName", } - a.parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") + var acc testutil.Accumulator + parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags) } func TestParseNamespaceInfo(t *testing.T) { - a := &Aerospike{} - var acc testutil.Accumulator - stats := map[string]string{ "namespace/test": "ns_cluster_size=1;effective_replication_factor=1;objects=2;tombstones=0;master_objects=2", } @@ -353,15 +348,12 @@ func TestParseNamespaceInfo(t *testing.T) { "namespace": "test", } - a.parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") + var acc testutil.Accumulator + parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags) } func TestParseSetInfo(t *testing.T) { - a := &Aerospike{} - - var acc testutil.Accumulator - stats := map[string]string{ "sets/test/foo": "objects=1:tombstones=0:memory_data_bytes=26;", } @@ -377,7 +369,9 @@ func TestParseSetInfo(t *testing.T) { "node_name": "TestNodeName", "set": "test/foo", } - a.parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") + + var acc testutil.Accumulator + parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags) } diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index 4042baf1b0237..83dfafad02e3e 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -26,7 +26,7 @@ const inputTitle = "inputs.aliyuncms" type mockGatherAliyunCMSClient struct{} -func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { +func (*mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { resp := new(cms.DescribeMetricListResponse) // switch request.Metric { diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go index b30a8b1b80871..a6fe8b7780d3e 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -131,7 +131,7 @@ func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { return gatherROCmSMI(data, acc) } -func (rsmi *ROCmSMI) Stop() {} +func (*ROCmSMI) Stop() {} func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) { // Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 448efad87d987..e5a32eab5b264 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -64,11 +64,11 @@ type AMQPConsumer struct { decoder internal.ContentDecoder } -func (a *externalAuth) Mechanism() string { +func (*externalAuth) Mechanism() string { return "EXTERNAL" } -func (a *externalAuth) Response() string { +func (*externalAuth) Response() string { return "\000" } @@ -175,7 +175,7 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error { +func (*AMQPConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index 6263404faa549..cb53f2b668ad5 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -120,7 +120,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { switch key { case "Scoreboard": - for field, value := range n.gatherScores(part) { + for field, value := range gatherScores(part) { fields[field] = value } default: @@ -137,7 +137,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { return nil } -func (n *Apache) gatherScores(data string) map[string]interface{} { +func gatherScores(data string) map[string]interface{} { var waiting, open = 0, 0 var s, r, w, k, d, c, l, g, i = 0, 0, 0, 0, 0, 0, 0, 0, 0 diff --git a/plugins/inputs/azure_monitor/azure_monitor.go b/plugins/inputs/azure_monitor/azure_monitor.go index 93b3627bf47d4..a53c8710e40d4 100644 --- a/plugins/inputs/azure_monitor/azure_monitor.go +++ b/plugins/inputs/azure_monitor/azure_monitor.go @@ -58,7 +58,7 @@ type azureClientsCreator interface { //go:embed sample.conf var sampleConfig string -func (am *AzureMonitor) SampleConfig() string { +func (*AzureMonitor) SampleConfig() string { return sampleConfig } @@ -170,7 +170,7 @@ func (am *AzureMonitor) setReceiver() error { return err } -func (acm *azureClientsManager) createAzureClients( +func (*azureClientsManager) createAzureClients( subscriptionID, clientID, clientSecret, tenantID string, clientOptions azcore.ClientOptions, ) (*receiver.AzureClients, error) { diff --git a/plugins/inputs/azure_monitor/azure_monitor_test.go b/plugins/inputs/azure_monitor/azure_monitor_test.go index e51b616baad0b..421b9282493b9 100644 --- a/plugins/inputs/azure_monitor/azure_monitor_test.go +++ b/plugins/inputs/azure_monitor/azure_monitor_test.go @@ -27,7 +27,7 @@ type mockAzureMetricDefinitionsClient struct{} type mockAzureMetricsClient struct{} -func (mam *mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azcore.ClientOptions) (*receiver.AzureClients, error) { +func (*mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azcore.ClientOptions) (*receiver.AzureClients, error) { return &receiver.AzureClients{ Ctx: context.Background(), ResourcesClient: &mockAzureResourcesClient{}, @@ -36,7 +36,7 @@ func (mam *mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azco }, nil } -func (marc *mockAzureResourcesClient) List(_ context.Context, _ *armresources.ClientListOptions) ([]*armresources.ClientListResponse, error) { +func (*mockAzureResourcesClient) List(_ context.Context, _ *armresources.ClientListOptions) ([]*armresources.ClientListResponse, error) { var responses []*armresources.ClientListResponse file, err := os.ReadFile("testdata/json/azure_resources_response.json") @@ -59,7 +59,7 @@ func (marc *mockAzureResourcesClient) List(_ context.Context, _ *armresources.Cl return responses, nil } -func (marc *mockAzureResourcesClient) ListByResourceGroup( +func (*mockAzureResourcesClient) ListByResourceGroup( _ context.Context, resourceGroup string, _ *armresources.ClientListByResourceGroupOptions) ([]*armresources.ClientListByResourceGroupResponse, error) { @@ -105,7 +105,7 @@ func (marc *mockAzureResourcesClient) ListByResourceGroup( return nil, errors.New("resource group was not found") } -func (mamdc *mockAzureMetricDefinitionsClient) List( +func (*mockAzureMetricDefinitionsClient) List( _ context.Context, resourceID string, _ *armmonitor.MetricDefinitionsClientListOptions) (armmonitor.MetricDefinitionsClientListResponse, error) { @@ -146,7 +146,7 @@ func (mamdc *mockAzureMetricDefinitionsClient) List( return armmonitor.MetricDefinitionsClientListResponse{}, errors.New("resource ID was not found") } -func (mamc *mockAzureMetricsClient) List( +func (*mockAzureMetricsClient) List( _ context.Context, resourceID string, _ *armmonitor.MetricsClientListOptions) (armmonitor.MetricsClientListResponse, error) { diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 37114a2d921a1..8a9c79fc19c22 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -53,7 +53,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { continue } } - if err := b.gatherBcache(bdev, acc); err != nil { + if err := gatherBcache(bdev, acc); err != nil { return fmt.Errorf("gathering bcache failed: %w", err) } } @@ -97,7 +97,7 @@ func prettyToBytes(v string) uint64 { return uint64(result) } -func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { +func gatherBcache(bdev string, acc telegraf.Accumulator) error { tags := getTags(bdev) metrics, err := filepath.Glob(bdev + "/stats_total/*") if err != nil { diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index a5c244ad3ce3e..6fdb33ba6ab8b 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -66,7 +66,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { if err != nil { acc.AddError(err) } - bond.gatherSysDetails(bondName, files, acc) + gatherSysDetails(bondName, files, acc) } } return nil @@ -164,7 +164,7 @@ func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) { return output, nil } -func (bond *Bond) gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) { +func gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) { var slaves []string var adPortCount int diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index 18d5c71ace644..17d91c640d498 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -145,7 +145,7 @@ func TestGatherBondInterface(t *testing.T) { acc = testutil.Accumulator{} require.NoError(t, bond.gatherBondInterface("bondLACP", sampleTestLACP, &acc)) - bond.gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) + gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACP"}) acc.AssertContainsTaggedFields( t, @@ -169,7 +169,7 @@ func TestGatherBondInterface(t *testing.T) { acc = testutil.Accumulator{} require.NoError(t, bond.gatherBondInterface("bondLACPUpDown", sampleTestLACPFirstUpSecondDown, &acc)) - bond.gatherSysDetails("bondLACPUpDown", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) + gatherSysDetails("bondLACPUpDown", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACPUpDown"}) acc.AssertContainsTaggedFields( t, diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 0cdf8a00bf8b9..05c2f84ed9f3b 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -289,14 +289,14 @@ func (b *Burrow) gatherTopics(guard chan struct{}, src *url.URL, cluster string, return } - b.genTopicMetrics(tr, cluster, topic, acc) + genTopicMetrics(tr, cluster, topic, acc) }(topic) } wg.Wait() } -func (b *Burrow) genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) { +func genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) { for i, offset := range r.Offsets { tags := map[string]string{ "cluster": cluster, @@ -346,7 +346,7 @@ func (b *Burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, return } - b.genGroupStatusMetrics(gr, cluster, group, acc) + genGroupStatusMetrics(gr, cluster, group, acc) b.genGroupLagMetrics(gr, cluster, group, acc) }(group) } @@ -354,7 +354,7 @@ func (b *Burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, wg.Wait() } -func (b *Burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) { +func genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) { partitionCount := r.Status.PartitionCount if partitionCount == 0 { partitionCount = len(r.Status.Partitions) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index b364c6e914f64..481e60a96a9c6 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -218,7 +218,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { return nil } -func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error { +func (*CiscoTelemetryMDT) Gather(telegraf.Accumulator) error { return nil } @@ -541,7 +541,7 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet } } -func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, +func parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, encodingPath string, tags map[string]string, timestamp time.Time) { // RIB measurement := encodingPath @@ -574,7 +574,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem } } -func (c *CiscoTelemetryMDT) parseMicroburst(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, +func parseMicroburst(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, encodingPath string, tags map[string]string, timestamp time.Time) { var nxMicro *telemetry.TelemetryField var nxMicro1 *telemetry.TelemetryField @@ -623,12 +623,12 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup isDme := strings.Contains(encodingPath, "sys/") if encodingPath == "rib" { // handle native data path rib - c.parseRib(grouper, field, encodingPath, tags, timestamp) + parseRib(grouper, field, encodingPath, tags, timestamp) return } if encodingPath == "microburst" { // dump microburst - c.parseMicroburst(grouper, field, encodingPath, tags, timestamp) + parseMicroburst(grouper, field, encodingPath, tags, timestamp) return } if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 { diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 73a8e39c623bb..11ec30251168c 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -210,7 +210,7 @@ func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, met Value float64 `json:"value"` } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) fields := make(map[string]interface{}) if commonMetricsIsFloat[metric] { @@ -241,7 +241,7 @@ func (ch *ClickHouse) zookeeper(acc telegraf.Accumulator, conn *connect) error { if err := ch.execQuery(conn.url, systemZookeeperExistsSQL, &zkExists); err != nil { return err } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) if len(zkExists) > 0 && zkExists[0].ZkExists > 0 { var zkRootNodes []struct { @@ -270,7 +270,7 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) return err } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) if len(replicationQueueExists) > 0 && replicationQueueExists[0].ReplicationQueueExists > 0 { var replicationTooManyTries []struct { @@ -301,7 +301,7 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err } if len(detachedParts) > 0 { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) acc.AddFields("clickhouse_detached_parts", map[string]interface{}{ "detached_parts": uint64(detachedParts[0].DetachedParts), @@ -323,7 +323,7 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro } for _, dict := range brokenDictionaries { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) isLoaded := uint64(1) if dict.Status != "LOADED" { @@ -356,7 +356,7 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { } if len(mutationsStatus) > 0 { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) acc.AddFields("clickhouse_mutations", map[string]interface{}{ @@ -384,7 +384,7 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { } for _, disk := range disksStatus { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) tags["name"] = disk.Name tags["path"] = disk.Path @@ -413,7 +413,7 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { } for _, process := range processesStats { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) tags["query_type"] = process.QueryType acc.AddFields("clickhouse_processes", @@ -448,7 +448,7 @@ func (ch *ClickHouse) textLog(acc telegraf.Accumulator, conn *connect) error { } for _, textLogItem := range textLogLast10MinMessages { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) tags["level"] = textLogItem.Level acc.AddFields("clickhouse_text_log", map[string]interface{}{ @@ -473,7 +473,7 @@ func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error { if err := ch.execQuery(conn.url, systemPartsSQL, &parts); err != nil { return err } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) for _, part := range parts { tags["table"] = part.Table @@ -490,7 +490,7 @@ func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error { return nil } -func (ch *ClickHouse) makeDefaultTags(conn *connect) map[string]string { +func makeDefaultTags(conn *connect) map[string]string { tags := map[string]string{ "source": conn.Hostname, } diff --git a/plugins/inputs/cloud_pubsub/cloud_pubsub.go b/plugins/inputs/cloud_pubsub/cloud_pubsub.go index 0f686b3e7f0df..d91c55f6683bf 100644 --- a/plugins/inputs/cloud_pubsub/cloud_pubsub.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub.go @@ -152,7 +152,7 @@ func (ps *PubSub) Start(ac telegraf.Accumulator) error { } // Gather does nothing for this service input. -func (ps *PubSub) Gather(_ telegraf.Accumulator) error { +func (*PubSub) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go index e745ee57eeb2c..d446d04e991bc 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go @@ -133,7 +133,7 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { return nil } -func (p *PubSubPush) Gather(_ telegraf.Accumulator) error { +func (*PubSubPush) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go index 9e8aa07d1f3db..06d91190f97bc 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go @@ -219,7 +219,7 @@ func TestServeHTTP(t *testing.T) { type testMetricMaker struct{} -func (tm *testMetricMaker) Name() string { +func (*testMetricMaker) Name() string { return "TestPlugin" } @@ -227,11 +227,11 @@ func (tm *testMetricMaker) LogName() string { return tm.Name() } -func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { +func (*testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } -func (tm *testMetricMaker) Log() telegraf.Logger { +func (*testMetricMaker) Log() telegraf.Logger { return logger.New("test", "test", "") } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 4cf11fe5955db..602da9b460c5f 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -21,7 +21,7 @@ import ( type mockGatherCloudWatchClient struct{} -func (m *mockGatherCloudWatchClient) ListMetrics( +func (*mockGatherCloudWatchClient) ListMetrics( _ context.Context, params *cloudwatch.ListMetricsInput, _ ...func(*cloudwatch.Options), @@ -56,7 +56,7 @@ func (m *mockGatherCloudWatchClient) ListMetrics( return response, nil } -func (m *mockGatherCloudWatchClient) GetMetricData( +func (*mockGatherCloudWatchClient) GetMetricData( _ context.Context, params *cloudwatch.GetMetricDataInput, _ ...func(*cloudwatch.Options), @@ -307,10 +307,10 @@ func TestGather_MultipleNamespaces(t *testing.T) { type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics( - _ context.Context, - _ *cloudwatch.ListMetricsInput, - _ ...func(*cloudwatch.Options), +func (*mockSelectMetricsCloudWatchClient) ListMetrics( + context.Context, + *cloudwatch.ListMetricsInput, + ...func(*cloudwatch.Options), ) (*cloudwatch.ListMetricsOutput, error) { metrics := make([]types.Metric, 0) // 4 metrics are available @@ -358,10 +358,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics( return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData( - _ context.Context, - _ *cloudwatch.GetMetricDataInput, - _ ...func(*cloudwatch.Options), +func (*mockSelectMetricsCloudWatchClient) GetMetricData( + context.Context, + *cloudwatch.GetMetricDataInput, + ...func(*cloudwatch.Options), ) (*cloudwatch.GetMetricDataOutput, error) { return nil, nil } diff --git a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go index a453932859139..6e824f50634a1 100644 --- a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go +++ b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go @@ -149,7 +149,7 @@ func (cms *CloudWatchMetricStreams) Start(acc telegraf.Accumulator) error { return nil } -func (cms *CloudWatchMetricStreams) Gather(_ telegraf.Accumulator) error { +func (*CloudWatchMetricStreams) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index f24bd795621f3..d0917df11c039 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -207,43 +207,43 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri fields := make(map[string]interface{}, 31) // CouchDB meta stats: - c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses) - c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites) - c.generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases) - c.generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits) - c.generateFields(fields, "couchdb_request_time", requestTime) - c.generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads) - c.generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles) + generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses) + generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites) + generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases) + generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits) + generateFields(fields, "couchdb_request_time", requestTime) + generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads) + generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles) // http request methods stats: - c.generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut) - c.generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet) - c.generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy) - c.generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete) - c.generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost) - c.generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead) + generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut) + generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet) + generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy) + generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete) + generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost) + generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead) // status code stats: - c.generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200) - c.generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201) - c.generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202) - c.generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301) - c.generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304) - c.generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400) - c.generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401) - c.generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403) - c.generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404) - c.generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405) - c.generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409) - c.generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412) - c.generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500) + generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200) + generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201) + generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202) + generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301) + generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304) + generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400) + generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401) + generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403) + generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404) + generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405) + generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409) + generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412) + generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500) // httpd stats: - c.generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges) - c.generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads) - c.generateFields(fields, "httpd_requests", stats.Httpd.Requests) - c.generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests) - c.generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads) + generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges) + generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads) + generateFields(fields, "httpd_requests", stats.Httpd.Requests) + generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests) + generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads) tags := map[string]string{ "server": host, @@ -252,7 +252,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri return nil } -func (c *CouchDB) generateFields(fields map[string]interface{}, prefix string, obj metaData) { +func generateFields(fields map[string]interface{}, prefix string, obj metaData) { if obj.Value != nil { fields[prefix+"_value"] = *obj.Value } diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go index ed91a39ceeb0a..718139c53822d 100644 --- a/plugins/inputs/csgo/csgo.go +++ b/plugins/inputs/csgo/csgo.go @@ -61,7 +61,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error { } // Generate the metric and add it to the accumulator - m, err := s.parseResponse(addr, response, t) + m, err := parseResponse(addr, response, t) if err != nil { acc.AddError(err) return @@ -74,7 +74,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *CSGO) parseResponse(addr, response string, t time.Time) (telegraf.Metric, error) { +func parseResponse(addr, response string, t time.Time) (telegraf.Metric, error) { rows := strings.Split(response, "\n") if len(rows) < 2 { return nil, errors.New("bad response") diff --git a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go index c6c4791597e39..b09cb4398b083 100644 --- a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go +++ b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go @@ -131,7 +131,7 @@ func (c *CtrlXDataLayer) Start(acc telegraf.Accumulator) error { return nil } -func (c *CtrlXDataLayer) Gather(_ telegraf.Accumulator) error { +func (*CtrlXDataLayer) Gather(telegraf.Accumulator) error { // Metrics are sent to the accumulator asynchronously in worker thread. So nothing to do here. return nil } diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 1b1af7d818e69..d89a023d0798f 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -133,7 +133,7 @@ func (c *clusterClient) setToken(token string) { } func (c *clusterClient) login(ctx context.Context, sa *serviceAccount) (*authToken, error) { - token, err := c.createLoginToken(sa) + token, err := createLoginToken(sa) if err != nil { return nil, err } @@ -316,7 +316,7 @@ func (c *clusterClient) toURL(path string) string { return clusterURL.String() } -func (c *clusterClient) createLoginToken(sa *serviceAccount) (string, error) { +func createLoginToken(sa *serviceAccount) (string, error) { token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{ UID: sa.accountID, RegisteredClaims: jwt.RegisteredClaims{ diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 411c3c7329174..8b195bc98ba94 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -59,14 +59,14 @@ func (c *tokenCreds) token(_ context.Context, _ client) (string, error) { return token, nil } -func (c *tokenCreds) isExpired() bool { +func (*tokenCreds) isExpired() bool { return true } -func (c *nullCreds) token(_ context.Context, _ client) (string, error) { +func (*nullCreds) token(context.Context, client) (string, error) { return "", nil } -func (c *nullCreds) isExpired() bool { +func (*nullCreds) isExpired() bool { return true } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 1b099fb3b1ef4..098954b177fcc 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -131,7 +131,7 @@ func (d *DCOS) gatherNode(ctx context.Context, acc telegraf.Accumulator, cluster acc.AddError(err) return } - d.addNodeMetrics(acc, cluster, m) + addNodeMetrics(acc, cluster, m) }() d.gatherContainers(ctx, acc, cluster, node) @@ -160,7 +160,7 @@ func (d *DCOS) gatherContainers(ctx context.Context, acc telegraf.Accumulator, c acc.AddError(err) return } - d.addContainerMetrics(acc, cluster, m) + addContainerMetrics(acc, cluster, m) }(container.ID) } @@ -177,14 +177,14 @@ func (d *DCOS) gatherContainers(ctx context.Context, acc telegraf.Accumulator, c acc.AddError(err) return } - d.addAppMetrics(acc, cluster, m) + addAppMetrics(acc, cluster, m) }(container.ID) } } wg.Wait() } -func (d *DCOS) createPoints(m *metrics) []*point { +func createPoints(m *metrics) []*point { points := make(map[string]*point) for _, dp := range m.Datapoints { fieldKey := strings.ReplaceAll(dp.Name, ".", "_") @@ -244,10 +244,10 @@ func (d *DCOS) createPoints(m *metrics) []*point { return results } -func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) { +func addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) { tm := time.Now() - points := d.createPoints(m) + points := createPoints(m) for _, p := range points { tags := make(map[string]string) @@ -266,16 +266,16 @@ func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *me } } -func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { - d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions) +func addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { + addMetrics(acc, cluster, "dcos_node", m, nodeDimensions) } -func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { - d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions) +func addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { + addMetrics(acc, cluster, "dcos_container", m, containerDimensions) } -func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { - d.addMetrics(acc, cluster, "dcos_app", m, appDimensions) +func addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { + addMetrics(acc, cluster, "dcos_app", m, appDimensions) } func (d *DCOS) initialize() error { diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 4915b73c6f8d7..60e32e0f0f6a0 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -196,8 +196,7 @@ func TestAddNodeMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - dcos := &DCOS{} - dcos.addNodeMetrics(&acc, "a", tt.metrics) + addNodeMetrics(&acc, "a", tt.metrics) for i, ok := range tt.check(&acc) { require.Truef(t, ok, "Index was not true: %d", i) } @@ -267,8 +266,7 @@ func TestAddContainerMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - dcos := &DCOS{} - dcos.addContainerMetrics(&acc, "a", tt.metrics) + addContainerMetrics(&acc, "a", tt.metrics) for i, ok := range tt.check(&acc) { require.Truef(t, ok, "Index was not true: %d", i) } @@ -341,8 +339,7 @@ func TestAddAppMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - dcos := &DCOS{} - dcos.addAppMetrics(&acc, "a", tt.metrics) + addAppMetrics(&acc, "a", tt.metrics) for i, ok := range tt.check(&acc) { require.Truef(t, ok, "Index was not true: %d", i) } diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index abc0e489e01d5..8e6eb1ee0c85a 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -128,7 +128,7 @@ func (d *DockerLogs) Init() error { } // Start is a noop which is required for a *DockerLogs to implement the telegraf.ServiceInput interface -func (d *DockerLogs) Start(telegraf.Accumulator) error { +func (*DockerLogs) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index 95dedd43bf42c..ff3b0b808ebf8 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -40,7 +40,7 @@ type response struct { io.Reader } -func (r *response) Close() error { +func (*response) Close() error { return nil } diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index f1564327507b8..ea4864f04ca3b 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -56,7 +56,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(s string, f string) { defer wg.Done() - acc.AddError(d.gatherServer(s, acc, d.Type, f)) + acc.AddError(gatherServer(s, acc, d.Type, f)) }(server, filter) } } @@ -65,7 +65,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { return nil } -func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype, filter string) error { +func gatherServer(addr string, acc telegraf.Accumulator, qtype, filter string) error { var proto string if strings.HasPrefix(addr, "/") { diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index b537c1fdc09c4..712ca3d439df1 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -68,7 +68,7 @@ func (ecs *Ecs) Gather(acc telegraf.Accumulator) error { } // accumulate metrics - ecs.accTask(task, taskTags, acc) + accTask(task, taskTags, acc) ecs.accContainers(task, taskTags, acc) return nil @@ -137,7 +137,7 @@ func resolveEndpoint(ecs *Ecs) { ecs.metadataVersion = 2 } -func (ecs *Ecs) accTask(task *ecsTask, tags map[string]string, acc telegraf.Accumulator) { +func accTask(task *ecsTask, tags map[string]string, acc telegraf.Accumulator) { taskFields := map[string]interface{}{ "desired_status": task.DesiredStatus, "known_status": task.KnownStatus, diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 3cbc7fd2e3b48..b8f51dbc29502 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -159,7 +159,7 @@ func (e *Elasticsearch) Init() error { return nil } -func (e *Elasticsearch) Start(_ telegraf.Accumulator) error { +func (*Elasticsearch) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index 0e06b4b049489..525a9a061d9d7 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -89,7 +89,7 @@ func (e *ElasticsearchQuery) Init() error { return nil } -func (e *ElasticsearchQuery) Start(_ telegraf.Accumulator) error { +func (*ElasticsearchQuery) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index 4629a8500066b..0116fc3d94e9a 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -269,15 +269,15 @@ func (c *commandEthtool) init() error { return nil } -func (c *commandEthtool) driverName(intf namespacedInterface) (driver string, err error) { +func (*commandEthtool) driverName(intf namespacedInterface) (driver string, err error) { return intf.namespace.driverName(intf) } -func (c *commandEthtool) stats(intf namespacedInterface) (stats map[string]uint64, err error) { +func (*commandEthtool) stats(intf namespacedInterface) (stats map[string]uint64, err error) { return intf.namespace.stats(intf) } -func (c *commandEthtool) get(intf namespacedInterface) (stats map[string]uint64, err error) { +func (*commandEthtool) get(intf namespacedInterface) (stats map[string]uint64, err error) { return intf.namespace.get(intf) } diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index 0088a3f3c93de..64e0c848bf789 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -35,19 +35,19 @@ func (n *namespaceMock) name() string { return n.namespaceName } -func (n *namespaceMock) interfaces() ([]namespacedInterface, error) { +func (*namespaceMock) interfaces() ([]namespacedInterface, error) { return nil, errors.New("it is a test bug to invoke this function") } -func (n *namespaceMock) driverName(_ namespacedInterface) (string, error) { +func (*namespaceMock) driverName(_ namespacedInterface) (string, error) { return "", errors.New("it is a test bug to invoke this function") } -func (n *namespaceMock) stats(_ namespacedInterface) (map[string]uint64, error) { +func (*namespaceMock) stats(_ namespacedInterface) (map[string]uint64, error) { return nil, errors.New("it is a test bug to invoke this function") } -func (n *namespaceMock) get(_ namespacedInterface) (map[string]uint64, error) { +func (*namespaceMock) get(_ namespacedInterface) (map[string]uint64, error) { return nil, errors.New("it is a test bug to invoke this function") } @@ -55,7 +55,7 @@ type commandEthtoolMock struct { interfaceMap map[string]*interfaceMock } -func (c *commandEthtoolMock) init() error { +func (*commandEthtoolMock) init() error { // Not required for test mock return nil } diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index efe8c29687a29..66dc7eea0f87b 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -59,7 +59,7 @@ func (*Exec) SampleConfig() string { return sampleConfig } -func (e *Exec) Init() error { +func (*Exec) Init() error { return nil } @@ -121,7 +121,7 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error { return nil } -func (c commandRunner) truncate(buf bytes.Buffer) bytes.Buffer { +func truncate(buf bytes.Buffer) bytes.Buffer { // Limit the number of bytes. didTruncate := false if buf.Len() > maxStderrBytes { diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index def86cb1c9897..eb605f8fd0cde 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -302,10 +302,9 @@ func TestTruncate(t *testing.T) { }, } - c := commandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res := c.truncate(*tt.bufF()) + res := truncate(*tt.bufF()) require.Equal(t, tt.expF().Bytes(), res.Bytes()) }) } diff --git a/plugins/inputs/exec/run_notwinodws.go b/plugins/inputs/exec/run_notwindows.go similarity index 96% rename from plugins/inputs/exec/run_notwinodws.go rename to plugins/inputs/exec/run_notwindows.go index 0fdaf2e73eb37..fa346e590bf0e 100644 --- a/plugins/inputs/exec/run_notwinodws.go +++ b/plugins/inputs/exec/run_notwindows.go @@ -44,7 +44,7 @@ func (c commandRunner) run( out = removeWindowsCarriageReturns(out) if stderr.Len() > 0 && !c.debug { stderr = removeWindowsCarriageReturns(stderr) - stderr = c.truncate(stderr) + stderr = truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr diff --git a/plugins/inputs/exec/run_windows.go b/plugins/inputs/exec/run_windows.go index fad0160b3119a..f7acc7c5fb712 100644 --- a/plugins/inputs/exec/run_windows.go +++ b/plugins/inputs/exec/run_windows.go @@ -46,7 +46,7 @@ func (c commandRunner) run( out = removeWindowsCarriageReturns(out) if stderr.Len() > 0 && !c.debug { stderr = removeWindowsCarriageReturns(stderr) - stderr = c.truncate(stderr) + stderr = truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 6368e2d21746f..d4dfcf00c232f 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -362,7 +362,7 @@ func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout tim type TestMetricMaker struct{} -func (tm *TestMetricMaker) Name() string { +func (*TestMetricMaker) Name() string { return "TestPlugin" } @@ -370,11 +370,11 @@ func (tm *TestMetricMaker) LogName() string { return tm.Name() } -func (tm *TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { +func (*TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { return aMetric } -func (tm *TestMetricMaker) Log() telegraf.Logger { +func (*TestMetricMaker) Log() telegraf.Logger { return logger.New("TestPlugin", "test", "") } diff --git a/plugins/inputs/execd/shim/input.go b/plugins/inputs/execd/shim/input.go index cf100256fe0b3..0b4ddf30891fc 100644 --- a/plugins/inputs/execd/shim/input.go +++ b/plugins/inputs/execd/shim/input.go @@ -8,16 +8,16 @@ type inputShim struct { } // LogName satisfies the MetricMaker interface -func (i inputShim) LogName() string { +func (inputShim) LogName() string { return "" } // MakeMetric satisfies the MetricMaker interface -func (i inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric { +func (inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric { return m // don't need to do anything to it. } // Log satisfies the MetricMaker interface -func (i inputShim) Log() telegraf.Logger { +func (inputShim) Log() telegraf.Logger { return nil } diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 63e073e5498fc..e3124ea74b353 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -85,11 +85,7 @@ type testInput struct { metricProcessed chan bool } -func (i *testInput) SampleConfig() string { - return "" -} - -func (i *testInput) Description() string { +func (*testInput) SampleConfig() string { return "" } @@ -105,11 +101,11 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(_ telegraf.Accumulator) error { +func (*testInput) Start(telegraf.Accumulator) error { return nil } -func (i *testInput) Stop() { +func (*testInput) Stop() { } func TestLoadConfig(t *testing.T) { @@ -137,15 +133,11 @@ type serviceInput struct { SecretValue string `toml:"secret_value"` } -func (i *serviceInput) SampleConfig() string { - return "" -} - -func (i *serviceInput) Description() string { +func (*serviceInput) SampleConfig() string { return "" } -func (i *serviceInput) Gather(acc telegraf.Accumulator) error { +func (*serviceInput) Gather(acc telegraf.Accumulator) error { acc.AddFields("measurement", map[string]interface{}{ "field": 1, @@ -157,11 +149,11 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(_ telegraf.Accumulator) error { +func (*serviceInput) Start(telegraf.Accumulator) error { return nil } -func (i *serviceInput) Stop() { +func (*serviceInput) Stop() { } // we can get stuck if stdout gets clogged up and nobody's reading from it. From 828f38e69c5abec0d7e3c3f95ffb6ba6e6cebd13 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 16 Dec 2024 10:15:24 +0800 Subject: [PATCH 083/170] feat(outputs): Fix go mod error --- go.sum | 8 -------- 1 file changed, 8 deletions(-) diff --git a/go.sum b/go.sum index 5c3e20e416695..4361d7679c2f9 100644 --- a/go.sum +++ b/go.sum @@ -2561,8 +2561,6 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2906,8 +2904,6 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2928,8 +2924,6 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2952,8 +2946,6 @@ golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From e74b0044dd2ae96a61a352b7e881253b729dbdab Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Mon, 16 Dec 2024 14:17:45 +0800 Subject: [PATCH 084/170] Update inlong_test.go --- plugins/outputs/inlong/inlong_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index be0bef2462c70..ef85b5d0a0b75 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -52,6 +52,7 @@ func TestInlong_Write(t *testing.T) { producer: producer, serializer: s, } + m := metric.New( "cpu", map[string]string{ From ba886846eb2fcc6c8851d4683c86754137d65134 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Mon, 16 Dec 2024 14:21:35 +0800 Subject: [PATCH 085/170] Update inlong_test.go --- plugins/outputs/inlong/inlong_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index ef85b5d0a0b75..be0bef2462c70 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -52,7 +52,6 @@ func TestInlong_Write(t *testing.T) { producer: producer, serializer: s, } - m := metric.New( "cpu", map[string]string{ From c0b3dd489e370e92aa399c42f0fca3e24b71e49d Mon Sep 17 00:00:00 2001 From: Sergio <239811+zomfg@users.noreply.github.com> Date: Mon, 16 Dec 2024 17:06:22 +0100 Subject: [PATCH 086/170] feat(inputs.docker): Support swarm jobs (#16292) --- plugins/inputs/docker/docker.go | 12 +++++++++++ plugins/inputs/docker/docker_test.go | 26 ++++++++++++++++++++++++ plugins/inputs/docker/docker_testdata.go | 25 +++++++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 0f5471eb11840..942c79d7dd8ce 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -309,6 +309,18 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { tags["service_mode"] = "global" fields["tasks_running"] = running[service.ID] fields["tasks_desired"] = tasksNoShutdown[service.ID] + } else if service.Spec.Mode.ReplicatedJob != nil { + tags["service_mode"] = "replicated_job" + fields["tasks_running"] = running[service.ID] + if service.Spec.Mode.ReplicatedJob.MaxConcurrent != nil { + fields["max_concurrent"] = *service.Spec.Mode.ReplicatedJob.MaxConcurrent + } + if service.Spec.Mode.ReplicatedJob.TotalCompletions != nil { + fields["total_completions"] = *service.Spec.Mode.ReplicatedJob.TotalCompletions + } + } else if service.Spec.Mode.GlobalJob != nil { + tags["service_mode"] = "global_job" + fields["tasks_running"] = running[service.ID] } else { d.Log.Error("Unknown replica mode") } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 600227e5b1653..24d2c59469267 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -1102,6 +1102,32 @@ func TestDockerGatherSwarmInfo(t *testing.T) { "service_mode": "global", }, ) + + acc.AssertContainsTaggedFields(t, + "docker_swarm", + map[string]interface{}{ + "tasks_running": int(0), + "max_concurrent": uint64(2), + "total_completions": uint64(2), + }, + map[string]string{ + "service_id": "rfmqydhe8cluzl9hayyrhw5ga", + "service_name": "test3", + "service_mode": "replicated_job", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker_swarm", + map[string]interface{}{ + "tasks_running": int(0), + }, + map[string]string{ + "service_id": "mp50lo68vqgkory4e26ts8f9d", + "service_name": "test4", + "service_mode": "global_job", + }, + ) } func TestContainerStateFilter(t *testing.T) { diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index 57be5a8cb1773..e0b5cb6f6cda0 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -196,6 +196,31 @@ var serviceList = []swarm.Service{ }, }, }, + { + ID: "rfmqydhe8cluzl9hayyrhw5ga", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "test3", + }, + Mode: swarm.ServiceMode{ + ReplicatedJob: &swarm.ReplicatedJob{ + MaxConcurrent: &two, + TotalCompletions: &two, + }, + }, + }, + }, + { + ID: "mp50lo68vqgkory4e26ts8f9d", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "test4", + }, + Mode: swarm.ServiceMode{ + GlobalJob: &swarm.GlobalJob{}, + }, + }, + }, } var taskList = []swarm.Task{ From 6f80899e13d461239c1612f9cc297b82d77d1eb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 17 Dec 2024 15:33:30 +0100 Subject: [PATCH 087/170] chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[f-k]` (#16308) --- .../filesystem_helpers_notwindows.go | 2 +- plugins/inputs/fireboard/fireboard.go | 4 +-- plugins/inputs/gnmi/gnmi.go | 2 +- plugins/inputs/gnmi/gnmi_test.go | 6 ++--- plugins/inputs/gnmi/tag_store.go | 6 ++--- .../google_cloud_storage.go | 2 +- plugins/inputs/graylog/graylog_test.go | 4 +-- plugins/inputs/haproxy/haproxy_test.go | 10 +++----- plugins/inputs/hddtemp/go-hddtemp/hddtemp.go | 2 +- plugins/inputs/hddtemp/hddtemp_test.go | 2 +- plugins/inputs/http/http.go | 2 +- .../http_listener_v2/http_listener_v2.go | 2 +- plugins/inputs/hugepages/hugepages.go | 6 ++--- plugins/inputs/icinga2/icinga2.go | 16 ++++++------ plugins/inputs/infiniband/infiniband_linux.go | 2 +- .../influxdb_listener/influxdb_listener.go | 2 +- .../influxdb_v2_listener.go | 2 +- .../inputs/intel_baseband/intel_baseband.go | 2 +- .../inputs/intel_baseband/log_connector.go | 8 +++--- .../intel_baseband/log_connector_test.go | 4 +-- plugins/inputs/intel_dlb/intel_dlb.go | 2 +- plugins/inputs/intel_pmt/intel_pmt.go | 2 +- plugins/inputs/intel_pmu/intel_pmu_test.go | 12 ++++----- plugins/inputs/intel_powerstat/options.go | 2 +- plugins/inputs/intel_rdt/intel_rdt.go | 2 +- plugins/inputs/intel_rdt/intel_rdt_test.go | 2 +- plugins/inputs/intel_rdt/processes.go | 2 +- plugins/inputs/ipmi_sensor/ipmi_sensor.go | 4 +-- .../inputs/ipmi_sensor/ipmi_sensor_test.go | 6 +---- plugins/inputs/ipset/ipset.go | 2 +- .../jti_openconfig_telemetry.go | 2 +- .../jti_openconfig_telemetry_test.go | 25 ++++++++----------- .../inputs/kafka_consumer/kafka_consumer.go | 2 +- .../kafka_consumer/kafka_consumer_test.go | 22 ++++++++-------- plugins/inputs/kernel/kernel.go | 14 +++++------ plugins/inputs/kernel/kernel_test.go | 14 ++--------- plugins/inputs/kibana/kibana.go | 2 +- plugins/inputs/kube_inventory/certificate.go | 4 +-- plugins/inputs/kube_inventory/endpoint.go | 4 +-- .../inputs/kube_inventory/endpoint_test.go | 7 +----- plugins/inputs/kube_inventory/ingress.go | 4 +-- plugins/inputs/kube_inventory/ingress_test.go | 7 +----- plugins/inputs/kube_inventory/node.go | 4 +-- plugins/inputs/kube_inventory/node_test.go | 2 +- .../inputs/kube_inventory/persistentvolume.go | 4 +-- .../kube_inventory/persistentvolume_test.go | 6 +---- 46 files changed, 104 insertions(+), 141 deletions(-) diff --git a/plugins/inputs/filecount/filesystem_helpers_notwindows.go b/plugins/inputs/filecount/filesystem_helpers_notwindows.go index e1a11c78ec2a4..7f4c6e2ced1e8 100644 --- a/plugins/inputs/filecount/filesystem_helpers_notwindows.go +++ b/plugins/inputs/filecount/filesystem_helpers_notwindows.go @@ -40,7 +40,7 @@ func (f fakeFileInfo) ModTime() time.Time { return f.modtime } func (f fakeFileInfo) IsDir() bool { return f.isdir } func (f fakeFileInfo) Sys() interface{} { return f.sys } -func (f fakeFileSystem) open(name string) (file, error) { +func (fakeFileSystem) open(name string) (file, error) { return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("not implemented by fake filesystem")} } diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index bf45fbe53172c..07398b66f9281 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -87,7 +87,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error { } // Range over all devices, gathering stats. Returns early in case of any error. for _, s := range stats { - r.gatherTemps(s, acc) + gatherTemps(s, acc) } return nil } @@ -105,7 +105,7 @@ func scale(n int) string { } // Gathers stats from a single device, adding them to the accumulator -func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) { +func gatherTemps(s fireboardStats, acc telegraf.Accumulator) { // Construct lookup for scale values for _, t := range s.LatestTemps { diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 3832669b8110d..dcf7101ff10e0 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -314,7 +314,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { return nil } -func (c *GNMI) Gather(_ telegraf.Accumulator) error { +func (*GNMI) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index 4f0feae0fcfc1..f9af63369027a 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -51,15 +51,15 @@ type mockServer struct { grpcServer *grpc.Server } -func (s *mockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { +func (*mockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { return nil, nil } -func (s *mockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { +func (*mockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { return nil, nil } -func (s *mockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { +func (*mockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { return nil, nil } diff --git a/plugins/inputs/gnmi/tag_store.go b/plugins/inputs/gnmi/tag_store.go index af6b2b55f2bcf..1ab48bfd50c10 100644 --- a/plugins/inputs/gnmi/tag_store.go +++ b/plugins/inputs/gnmi/tag_store.go @@ -89,7 +89,7 @@ func (s *tagStore) insert(subscription tagSubscription, path *pathInfo, values [ } } case "elements": - key, match := s.getElementsKeys(path, subscription.Elements) + key, match := getElementsKeys(path, subscription.Elements) if !match || len(values) == 0 { return nil } @@ -141,7 +141,7 @@ func (s *tagStore) lookup(path *pathInfo, metricTags map[string]string) map[stri // Match elements for _, requiredKeys := range s.elements.required { - key, match := s.getElementsKeys(path, requiredKeys) + key, match := getElementsKeys(path, requiredKeys) if !match { continue } @@ -153,7 +153,7 @@ func (s *tagStore) lookup(path *pathInfo, metricTags map[string]string) map[stri return tags } -func (s *tagStore) getElementsKeys(path *pathInfo, elements []string) (string, bool) { +func getElementsKeys(path *pathInfo, elements []string) (string, bool) { // Search for the required path elements and collect a ordered // list of their values to in the form // elementName1={keyA=valueA,keyB=valueB,...},...,elementNameN={keyY=valueY,keyZ=valueZ} diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage.go b/plugins/inputs/google_cloud_storage/google_cloud_storage.go index 920e66b8e0642..2c45552aeb1da 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage.go @@ -57,7 +57,7 @@ func (gcs *GCS) Init() error { return gcs.setOffset() } -func (gcs *GCS) SampleConfig() string { +func (*GCS) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index 0662dc058566b..ecbb40b03cc3a 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -119,10 +119,10 @@ func (c *mockHTTPClient) makeRequest(req *http.Request) (*http.Response, error) return &resp, nil } -func (c *mockHTTPClient) setHTTPClient(_ *http.Client) { +func (*mockHTTPClient) setHTTPClient(*http.Client) { } -func (c *mockHTTPClient) httpClient() *http.Client { +func (*mockHTTPClient) httpClient() *http.Client { return nil } diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index 884dcdc8dc76f..ec34e817bb87f 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -17,9 +17,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -type statServer struct{} - -func (s statServer) serverSocket(l net.Listener) { +func serverSocket(l net.Listener) { for { conn, err := l.Accept() if err != nil { @@ -151,8 +149,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { sockets[i] = sock defer sock.Close() //nolint:revive,gocritic // done on purpose, closing will be executed properly - s := statServer{} - go s.serverSocket(sock) + go serverSocket(sock) } r := &HAProxy{ @@ -191,8 +188,7 @@ func TestHaproxyGeneratesMetricsUsingTcp(t *testing.T) { } defer l.Close() - s := statServer{} - go s.serverSocket(l) + go serverSocket(l) r := &HAProxy{ Servers: []string{"tcp://" + l.Addr().String()}, diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go index 7c58cfbea321b..1e511b5e9bb28 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go @@ -25,7 +25,7 @@ func New() *hddtemp { } // Fetch gathers disks data from hddtemp daemon. -func (h *hddtemp) Fetch(address string) ([]Disk, error) { +func (*hddtemp) Fetch(address string) ([]Disk, error) { var ( err error conn net.Conn diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index b266600a95682..f1dd99cf8df5f 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -12,7 +12,7 @@ import ( type mockFetcher struct { } -func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) { +func (*mockFetcher) Fetch(string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ { DeviceName: "Disk1", diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 5cf8eb5919af1..5557d5bd108d5 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -86,7 +86,7 @@ func (h *HTTP) SetParserFunc(fn telegraf.ParserFunc) { h.parserFunc = fn } -func (h *HTTP) Start(_ telegraf.Accumulator) error { +func (*HTTP) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 825da44535801..e940620fb2eed 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -197,7 +197,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { return nil } -func (h *HTTPListenerV2) Gather(_ telegraf.Accumulator) error { +func (*HTTPListenerV2) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/hugepages/hugepages.go b/plugins/inputs/hugepages/hugepages.go index fb7a719179ef1..ebd31845816aa 100644 --- a/plugins/inputs/hugepages/hugepages.go +++ b/plugins/inputs/hugepages/hugepages.go @@ -118,7 +118,7 @@ func (h *Hugepages) Gather(acc telegraf.Accumulator) error { // gatherStatsPerNode collects root hugepages statistics func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error { - return h.gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil) + return gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil) } // gatherStatsPerNode collects hugepages statistics per NUMA node @@ -144,7 +144,7 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error { "node": nodeNumber, } hugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), "hugepages") - err = h.gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags) + err = gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags) if err != nil { return err } @@ -152,7 +152,7 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error { return nil } -func (h *Hugepages) gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter, defaultTags map[string]string) error { +func gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter, defaultTags map[string]string) error { // read metrics from: hugepages/hugepages-*/* hugepagesDirs, err := os.ReadDir(path) if err != nil { diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 7f85b3c0f01b9..a0aa1bca47a6b 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -121,7 +121,7 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { } result := resultObject{} - err = i.parseObjectResponse(resp, &result) + err = parseObjectResponse(resp, &result) if err != nil { return fmt.Errorf("could not parse object response: %w", err) } @@ -145,13 +145,13 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { switch statusType { case "ApiListener": - fields, err = i.parsePerfdataResponse(resp) + fields, err = parsePerfdataResponse(resp) case "CIB": - fields, err = i.parseCIBResponse(resp) + fields, err = parseCIBResponse(resp) case "IdoMysqlConnection": - fields, err = i.parsePerfdataResponse(resp) + fields, err = parsePerfdataResponse(resp) case "IdoPgsqlConnection": - fields, err = i.parsePerfdataResponse(resp) + fields, err = parsePerfdataResponse(resp) } if err != nil { @@ -233,7 +233,7 @@ func (i *Icinga2) icingaRequest(address string) (*http.Response, error) { return resp, nil } -func (i *Icinga2) parseObjectResponse(resp *http.Response, result *resultObject) error { +func parseObjectResponse(resp *http.Response, result *resultObject) error { err := json.NewDecoder(resp.Body).Decode(&result) if err != nil { return err @@ -246,7 +246,7 @@ func (i *Icinga2) parseObjectResponse(resp *http.Response, result *resultObject) return nil } -func (i *Icinga2) parseCIBResponse(resp *http.Response) (map[string]interface{}, error) { +func parseCIBResponse(resp *http.Response) (map[string]interface{}, error) { result := resultCIB{} err := json.NewDecoder(resp.Body).Decode(&result) @@ -262,7 +262,7 @@ func (i *Icinga2) parseCIBResponse(resp *http.Response) (map[string]interface{}, return result.Results[0].Status, nil } -func (i *Icinga2) parsePerfdataResponse(resp *http.Response) (map[string]interface{}, error) { +func parsePerfdataResponse(resp *http.Response) (map[string]interface{}, error) { result := resultPerfdata{} err := json.NewDecoder(resp.Body).Decode(&result) diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 72bbc4714d763..214ba8e08fd67 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -12,7 +12,7 @@ import ( ) // Gather statistics from our infiniband cards -func (i *Infiniband) Gather(acc telegraf.Accumulator) error { +func (*Infiniband) Gather(acc telegraf.Accumulator) error { rdmaDevices := rdmamap.GetRdmaDeviceList() if len(rdmaDevices) == 0 { diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 9186ecac8e54a..044b4c1050d26 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -76,7 +76,7 @@ func (*InfluxDBListener) SampleConfig() string { return sampleConfig } -func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error { +func (*InfluxDBListener) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index fb8a03de83949..52854d6f6e4d6 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -120,7 +120,7 @@ func (h *InfluxDBV2Listener) Init() error { return nil } -func (h *InfluxDBV2Listener) Gather(_ telegraf.Accumulator) error { +func (*InfluxDBV2Listener) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/intel_baseband/intel_baseband.go b/plugins/inputs/intel_baseband/intel_baseband.go index 4017cb74b8946..ae22b7acee11b 100644 --- a/plugins/inputs/intel_baseband/intel_baseband.go +++ b/plugins/inputs/intel_baseband/intel_baseband.go @@ -56,7 +56,7 @@ type Baseband struct { sockConn *socketConnector } -func (b *Baseband) SampleConfig() string { +func (*Baseband) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/intel_baseband/log_connector.go b/plugins/inputs/intel_baseband/log_connector.go index cfb67b57c1947..82c5367b5f1b1 100644 --- a/plugins/inputs/intel_baseband/log_connector.go +++ b/plugins/inputs/intel_baseband/log_connector.go @@ -135,7 +135,7 @@ func (lc *logConnector) readNumVFs() error { continue } - numVFs, err := lc.parseNumVFs(line) + numVFs, err := parseNumVFs(line) if err != nil { lc.numVFs = -1 return err @@ -189,7 +189,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric, return offsetLine, nil, err } - operationName := lc.parseOperationName(line) + operationName := parseOperationName(line) if len(operationName) == 0 { return offsetLine, nil, errors.New("valid operation name wasn't found in log") } @@ -221,7 +221,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric, } // Example value = Thu Apr 13 13:28:40 2023:INFO:Device Status:: 2 VFs -func (lc *logConnector) parseNumVFs(s string) (int, error) { +func parseNumVFs(s string) (int, error) { i := strings.LastIndex(s, deviceStatusStartPrefix) if i == -1 { return 0, errors.New("couldn't find device status prefix in line") @@ -244,7 +244,7 @@ func (lc *logConnector) parseNumVFs(s string) (int, error) { // Parse Operation name // Example = Thu Apr 13 13:28:40 2023:INFO:5GUL counters: Code Blocks // Output: 5GUL -func (lc *logConnector) parseOperationName(s string) string { +func parseOperationName(s string) string { i := strings.Index(s, infoLine) if i >= 0 { j := strings.Index(s[i:], countersLine) diff --git a/plugins/inputs/intel_baseband/log_connector_test.go b/plugins/inputs/intel_baseband/log_connector_test.go index 9d07e93ff754b..b2b286dab6d26 100644 --- a/plugins/inputs/intel_baseband/log_connector_test.go +++ b/plugins/inputs/intel_baseband/log_connector_test.go @@ -240,11 +240,9 @@ func TestParseOperationName(t *testing.T) { {"", ""}, } - logConnector := prepareLogConnMock() - require.NotNil(t, logConnector) for _, tc := range testCases { t.Run("expected "+tc.expected, func(t *testing.T) { - operationName := logConnector.parseOperationName(tc.input) + operationName := parseOperationName(tc.input) require.Equal(t, tc.expected, operationName) }) } diff --git a/plugins/inputs/intel_dlb/intel_dlb.go b/plugins/inputs/intel_dlb/intel_dlb.go index 643713ce1cba8..ddbe40c1adf58 100644 --- a/plugins/inputs/intel_dlb/intel_dlb.go +++ b/plugins/inputs/intel_dlb/intel_dlb.go @@ -50,7 +50,7 @@ type IntelDLB struct { maxInitMessageLength uint32 } -func (d *IntelDLB) SampleConfig() string { +func (*IntelDLB) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/intel_pmt/intel_pmt.go b/plugins/inputs/intel_pmt/intel_pmt.go index f61980b7626e4..54e91613caefa 100644 --- a/plugins/inputs/intel_pmt/intel_pmt.go +++ b/plugins/inputs/intel_pmt/intel_pmt.go @@ -56,7 +56,7 @@ type fileInfo struct { pciBdf string // PCI Bus:Device.Function (BDF) } -func (p *IntelPMT) SampleConfig() string { +func (*IntelPMT) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/intel_pmu/intel_pmu_test.go b/plugins/inputs/intel_pmu/intel_pmu_test.go index 6c75f68f68378..2910c905ad100 100644 --- a/plugins/inputs/intel_pmu/intel_pmu_test.go +++ b/plugins/inputs/intel_pmu/intel_pmu_test.go @@ -547,9 +547,9 @@ type fakeFileInfo struct { fileMode os.FileMode } -func (f fakeFileInfo) Name() string { return "" } -func (f fakeFileInfo) Size() int64 { return 0 } -func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } -func (f fakeFileInfo) ModTime() time.Time { return time.Time{} } -func (f fakeFileInfo) IsDir() bool { return false } -func (f fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) Name() string { return "" } +func (fakeFileInfo) Size() int64 { return 0 } +func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } +func (fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (fakeFileInfo) IsDir() bool { return false } +func (fakeFileInfo) Sys() interface{} { return nil } diff --git a/plugins/inputs/intel_powerstat/options.go b/plugins/inputs/intel_powerstat/options.go index 7e422b26bc01f..5e4fd4df4ad72 100644 --- a/plugins/inputs/intel_powerstat/options.go +++ b/plugins/inputs/intel_powerstat/options.go @@ -33,7 +33,7 @@ type optGenerator struct{} // generate takes plugin configuration options and generates options needed // to gather requested metrics. -func (g *optGenerator) generate(cfg optConfig) []ptel.Option { +func (*optGenerator) generate(cfg optConfig) []ptel.Option { opts := make([]ptel.Option, 0) if len(cfg.includedCPUs) != 0 { opts = append(opts, ptel.WithIncludedCPUs(cfg.includedCPUs)) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 1c0685e634bf7..d1427fbdb4f00 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -100,7 +100,7 @@ func (r *IntelRDT) Start(acc telegraf.Accumulator) error { return nil } -func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { +func (*IntelRDT) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index e9468521276fb..7f4dc00919695 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -12,7 +12,7 @@ import ( type mockProc struct{} -func (m *mockProc) getAllProcesses() ([]process, error) { +func (*mockProc) getAllProcesses() ([]process, error) { procs := []process{ {Name: "process", PID: 1000}, {Name: "process2", PID: 1002}, diff --git a/plugins/inputs/intel_rdt/processes.go b/plugins/inputs/intel_rdt/processes.go index 63c8622aa1875..975760b0bfe40 100644 --- a/plugins/inputs/intel_rdt/processes.go +++ b/plugins/inputs/intel_rdt/processes.go @@ -19,7 +19,7 @@ func newProcessor() processesHandler { return &processManager{} } -func (p *processManager) getAllProcesses() ([]process, error) { +func (*processManager) getAllProcesses() ([]process, error) { allProcesses, err := procfs.AllProcs() if err != nil { return nil, err diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor.go b/plugins/inputs/ipmi_sensor/ipmi_sensor.go index b70de8d0e3c2b..403e7b3fe7a59 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor.go @@ -179,7 +179,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server, sensor string) error { return m.parseV1(acc, hostname, out, timestamp) } case "chassis_power_status": - return m.parseChassisPowerStatus(acc, hostname, out, timestamp) + return parseChassisPowerStatus(acc, hostname, out, timestamp) case "dcmi_power_reading": return m.parseDCMIPowerReading(acc, hostname, out, timestamp) } @@ -187,7 +187,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server, sensor string) error { return fmt.Errorf("unknown sensor type %q", sensor) } -func (m *Ipmi) parseChassisPowerStatus(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { +func parseChassisPowerStatus(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Chassis Power is on // Chassis Power is off diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go index 74dab5eb18b8f..03c0c316d28a4 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go @@ -820,14 +820,10 @@ func Test_parsePowerStatus(t *testing.T) { }, } - ipmi := &Ipmi{ - Log: testutil.Logger{}, - } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - err := ipmi.parseChassisPowerStatus(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt) + err := parseChassisPowerStatus(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index 4a7e0938a31ff..be177d10a26ac 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -38,7 +38,7 @@ func (*Ipset) SampleConfig() string { return sampleConfig } -func (i *Ipset) Init() error { +func (*Ipset) Init() error { _, err := exec.LookPath("ipset") if err != nil { return err diff --git a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go index e60dda37245bb..b6c7d039515fa 100644 --- a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go @@ -172,7 +172,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { return nil } -func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error { +func (*OpenConfigTelemetry) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go index 758beb1ff9eef..b98f9100ae92f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go @@ -58,10 +58,7 @@ type openConfigTelemetryServer struct { telemetry.UnimplementedOpenConfigTelemetryServer } -func (s *openConfigTelemetryServer) TelemetrySubscribe( - req *telemetry.SubscriptionRequest, - stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer, -) error { +func (*openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { path := req.PathList[0].Path switch path { case "/sensor": @@ -78,28 +75,28 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe( return nil } -func (s *openConfigTelemetryServer) CancelTelemetrySubscription( - _ context.Context, - _ *telemetry.CancelSubscriptionRequest, +func (*openConfigTelemetryServer) CancelTelemetrySubscription( + context.Context, + *telemetry.CancelSubscriptionRequest, ) (*telemetry.CancelSubscriptionReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetrySubscriptions( - _ context.Context, - _ *telemetry.GetSubscriptionsRequest, +func (*openConfigTelemetryServer) GetTelemetrySubscriptions( + context.Context, + *telemetry.GetSubscriptionsRequest, ) (*telemetry.GetSubscriptionsReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetryOperationalState( - _ context.Context, - _ *telemetry.GetOperationalStateRequest, +func (*openConfigTelemetryServer) GetTelemetryOperationalState( + context.Context, + *telemetry.GetOperationalStateRequest, ) (*telemetry.GetOperationalStateReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { +func (*openConfigTelemetryServer) GetDataEncodings(context.Context, *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { return nil, nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 104ba26156ff6..ac335eec0810f 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -299,7 +299,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error { +func (*KafkaConsumer) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 568da16b6a095..94bc4096617f5 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -240,53 +240,53 @@ type FakeConsumerGroupSession struct { ctx context.Context } -func (s *FakeConsumerGroupSession) Claims() map[string][]int32 { +func (*FakeConsumerGroupSession) Claims() map[string][]int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MemberID() string { +func (*FakeConsumerGroupSession) MemberID() string { panic("not implemented") } -func (s *FakeConsumerGroupSession) GenerationID() int32 { +func (*FakeConsumerGroupSession) GenerationID() int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) { +func (*FakeConsumerGroupSession) MarkOffset(string, int32, int64, string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) { +func (*FakeConsumerGroupSession) ResetOffset(string, int32, int64, string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) { +func (*FakeConsumerGroupSession) MarkMessage(*sarama.ConsumerMessage, string) { } func (s *FakeConsumerGroupSession) Context() context.Context { return s.ctx } -func (s *FakeConsumerGroupSession) Commit() { +func (*FakeConsumerGroupSession) Commit() { } type FakeConsumerGroupClaim struct { messages chan *sarama.ConsumerMessage } -func (c *FakeConsumerGroupClaim) Topic() string { +func (*FakeConsumerGroupClaim) Topic() string { panic("not implemented") } -func (c *FakeConsumerGroupClaim) Partition() int32 { +func (*FakeConsumerGroupClaim) Partition() int32 { panic("not implemented") } -func (c *FakeConsumerGroupClaim) InitialOffset() int64 { +func (*FakeConsumerGroupClaim) InitialOffset() int64 { panic("not implemented") } -func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 { +func (*FakeConsumerGroupClaim) HighWaterMarkOffset() int64 { panic("not implemented") } diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 88c18c2101c45..7ddf0d714762e 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -68,12 +68,12 @@ func (k *Kernel) Init() error { } func (k *Kernel) Gather(acc telegraf.Accumulator) error { - data, err := k.getProcValueBytes(k.statFile) + data, err := getProcValueBytes(k.statFile) if err != nil { return err } - entropyValue, err := k.getProcValueInt(k.entropyStatFile) + entropyValue, err := getProcValueInt(k.entropyStatFile) if err != nil { return err } @@ -137,7 +137,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { extraStats := []string{"general_profit"} for _, f := range stats { - m, err := k.getProcValueInt(filepath.Join(k.ksmStatsDir, f)) + m, err := getProcValueInt(filepath.Join(k.ksmStatsDir, f)) if err != nil { return err } @@ -146,7 +146,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { } for _, f := range extraStats { - m, err := k.getProcValueInt(filepath.Join(k.ksmStatsDir, f)) + m, err := getProcValueInt(filepath.Join(k.ksmStatsDir, f)) if err != nil { // if an extraStats metric doesn't exist in our kernel version, ignore it. continue @@ -166,7 +166,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kernel) getProcValueBytes(path string) ([]byte, error) { +func getProcValueBytes(path string) ([]byte, error) { if _, err := os.Stat(path); os.IsNotExist(err) { return nil, fmt.Errorf("path %q does not exist", path) } else if err != nil { @@ -181,8 +181,8 @@ func (k *Kernel) getProcValueBytes(path string) ([]byte, error) { return data, nil } -func (k *Kernel) getProcValueInt(path string) (int64, error) { - data, err := k.getProcValueBytes(path) +func getProcValueInt(path string) (int64, error) { + data, err := getProcValueBytes(path) if err != nil { return -1, err } diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index da3f3aa46cf3d..23d72949d742a 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -14,23 +14,13 @@ import ( ) func TestGetProcValueInt(t *testing.T) { - k := Kernel{ - statFile: "testdata/stat_file_full", - entropyStatFile: "testdata/entropy_stat_file_full", - } - - d, err := k.getProcValueInt(k.entropyStatFile) + d, err := getProcValueInt("testdata/entropy_stat_file_full") require.NoError(t, err) require.IsType(t, int64(1), d) } func TestGetProcValueByte(t *testing.T) { - k := Kernel{ - statFile: "testdata/stat_file_full", - entropyStatFile: "testdata/entropy_stat_file_full", - } - - d, err := k.getProcValueBytes(k.entropyStatFile) + d, err := getProcValueBytes("testdata/entropy_stat_file_full") require.NoError(t, err) require.IsType(t, []byte("test"), d) } diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 622030728c74b..702b288ea01a1 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -101,7 +101,7 @@ func (*Kibana) SampleConfig() string { return sampleConfig } -func (k *Kibana) Start(_ telegraf.Accumulator) error { +func (*Kibana) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kube_inventory/certificate.go b/plugins/inputs/kube_inventory/certificate.go index 5cf3603288c99..ba71f013b2f83 100644 --- a/plugins/inputs/kube_inventory/certificate.go +++ b/plugins/inputs/kube_inventory/certificate.go @@ -19,7 +19,7 @@ func collectSecrets(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - ki.gatherCertificates(i, acc) + gatherCertificates(i, acc) } } @@ -59,7 +59,7 @@ func getTags(cert *x509.Certificate) map[string]string { return tags } -func (ki *KubernetesInventory) gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) { +func gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) { now := time.Now() for resourceName, val := range r.Data { diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 1eb86eea13b76..742512f6824fe 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -15,11 +15,11 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne return } for _, i := range list.Items { - ki.gatherEndpoint(i, acc) + gatherEndpoint(i, acc) } } -func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { +func gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { creationTs := e.GetCreationTimestamp() if creationTs.IsZero() { return diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index f5be722c925bc..c5a8a7509ed31 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -13,8 +13,6 @@ import ( ) func TestEndpoint(t *testing.T) { - cli := &client{} - now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -256,12 +254,9 @@ func TestEndpoint(t *testing.T) { } for _, v := range tests { - ks := &KubernetesInventory{ - client: cli, - } acc := new(testutil.Accumulator) for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { - ks.gatherEndpoint(endpoint, acc) + gatherEndpoint(endpoint, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index f8a966bc15a46..41890e44c0479 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -15,11 +15,11 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - ki.gatherIngress(i, acc) + gatherIngress(i, acc) } } -func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { +func gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { creationTs := i.GetCreationTimestamp() if creationTs.IsZero() { return diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index a391b3808c29b..0ba519b69bf11 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -13,8 +13,6 @@ import ( ) func TestIngress(t *testing.T) { - cli := &client{} - now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -219,12 +217,9 @@ func TestIngress(t *testing.T) { } for _, v := range tests { - ks := &KubernetesInventory{ - client: cli, - } acc := new(testutil.Accumulator) for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items { - ks.gatherIngress(ingress, acc) + gatherIngress(ingress, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index 8aa4e979a65c3..3660c00c7ad90 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -15,14 +15,14 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI return } - ki.gatherNodeCount(len(list.Items), acc) + gatherNodeCount(len(list.Items), acc) for i := range list.Items { ki.gatherNode(&list.Items[i], acc) } } -func (ki *KubernetesInventory) gatherNodeCount(count int, acc telegraf.Accumulator) { +func gatherNodeCount(count int, acc telegraf.Accumulator) { fields := map[string]interface{}{"node_count": count} tags := make(map[string]string) diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index 5527bca1d020e..00d9093887f7a 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -173,7 +173,7 @@ func TestNode(t *testing.T) { if v.name == "no nodes" { nodeCount := len((v.handler.responseMap["/nodes/"]).(corev1.NodeList).Items) - ks.gatherNodeCount(nodeCount, acc) + gatherNodeCount(nodeCount, acc) } require.Len(t, acc.Metrics, len(v.output)) testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go index 808db450dbcb1..6fb65e9c46874 100644 --- a/plugins/inputs/kube_inventory/persistentvolume.go +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -16,11 +16,11 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki return } for i := range list.Items { - ki.gatherPersistentVolume(&list.Items[i], acc) + gatherPersistentVolume(&list.Items[i], acc) } } -func (ki *KubernetesInventory) gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) { +func gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) { phaseType := 5 switch strings.ToLower(string(pv.Status.Phase)) { case "bound": diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go index 2e3c15b4824a7..1a93f9b2b7a61 100644 --- a/plugins/inputs/kube_inventory/persistentvolume_test.go +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -13,7 +13,6 @@ import ( ) func TestPersistentVolume(t *testing.T) { - cli := &client{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -77,13 +76,10 @@ func TestPersistentVolume(t *testing.T) { } for _, v := range tests { - ks := &KubernetesInventory{ - client: cli, - } acc := new(testutil.Accumulator) items := ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items for i := range items { - ks.gatherPersistentVolume(&items[i], acc) + gatherPersistentVolume(&items[i], acc) } err := acc.FirstError() From b89f127b4582f583ece2562bdab5972e7b88542b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:04:44 -0600 Subject: [PATCH 088/170] chore(deps): Bump super-linter/super-linter from 7.2.0 to 7.2.1 (#16313) --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 9fd8d494f046a..c16cbfeb88094 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -54,7 +54,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: super-linter/super-linter@v7.2.0 + uses: super-linter/super-linter@v7.2.1 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master From a4fc9244e698244a00679d3e7b2a6369a416174a Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Tue, 17 Dec 2024 12:05:09 -0500 Subject: [PATCH 089/170] docs(parsers.openmetrics): Update link to specification (#16312) Signed-off-by: David Ashpole --- plugins/parsers/openmetrics/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/openmetrics/README.md b/plugins/parsers/openmetrics/README.md index a9328a8d96072..e582941fc31f7 100644 --- a/plugins/parsers/openmetrics/README.md +++ b/plugins/parsers/openmetrics/README.md @@ -8,7 +8,7 @@ but can also be used by e.g. The plugin allows to output different metric formats as described in the [Metric Formats section](#metric-formats). -[OpenMetrics Text Format]: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md +[OpenMetrics Text Format]: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md ## Configuration From d0a045d56bb0870a4c088a4d68cb3ca2870b1701 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:05:28 -0600 Subject: [PATCH 090/170] chore(deps): Bump github.com/fatih/color from 1.17.0 to 1.18.0 (#16317) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bd7f76df4317e..e30b292ba6e1b 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( github.com/eclipse/paho.golang v0.21.0 github.com/eclipse/paho.mqtt.golang v1.5.0 github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc - github.com/fatih/color v1.17.0 + github.com/fatih/color v1.18.0 github.com/go-ldap/ldap/v3 v3.4.8 github.com/go-logfmt/logfmt v0.6.0 github.com/go-ole/go-ole v1.3.0 diff --git a/go.sum b/go.sum index 628bb6e28e1ee..2951a27105d8a 100644 --- a/go.sum +++ b/go.sum @@ -1194,8 +1194,8 @@ github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y= From 686cda839ba8374bf1a33922f338486cc57fe2d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:05:53 -0600 Subject: [PATCH 091/170] chore(deps): Bump github.com/IBM/nzgo/v12 from 12.0.9-0.20231115043259-49c27f2dfe48 to 12.0.9 (#16319) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e30b292ba6e1b..e47fcf260b100 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/BurntSushi/toml v1.4.0 github.com/ClickHouse/clickhouse-go v1.5.4 github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 + github.com/IBM/nzgo/v12 v12.0.9 github.com/IBM/sarama v1.43.3 github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig v2.22.0+incompatible diff --git a/go.sum b/go.sum index 2951a27105d8a..c3d14f7137813 100644 --- a/go.sum +++ b/go.sum @@ -735,8 +735,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 h1:TBb4IxmBH0ssmWTUg0C6c9ZnfDmZospTF8f+YbHnbbA= -github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48/go.mod h1:4pvfEkfsrAdqlljsp8HNwv/uzNKy2fzoXBB1aRIssJg= +github.com/IBM/nzgo/v12 v12.0.9 h1:SwzYFU5ooXsTZsQhU6OsbUhs/fQyLvCtlJYSEZ58mN0= +github.com/IBM/nzgo/v12 v12.0.9/go.mod h1:4pvfEkfsrAdqlljsp8HNwv/uzNKy2fzoXBB1aRIssJg= github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= From 7360f5dc5a18f1ea49b41a6638f7527f8b15092e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:08:00 -0600 Subject: [PATCH 092/170] chore(deps): Bump github.com/prometheus/common from 0.60.0 to 0.61.0 (#16318) --- go.mod | 10 +++++----- go.sum | 19 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index e47fcf260b100..584286c7008ff 100644 --- a/go.mod +++ b/go.mod @@ -164,7 +164,7 @@ require ( github.com/prometheus-community/pro-bing v0.4.1 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.60.0 + github.com/prometheus/common v0.61.0 github.com/prometheus/procfs v0.15.1 github.com/prometheus/prometheus v0.54.1 github.com/rabbitmq/amqp091-go v1.10.0 @@ -186,7 +186,7 @@ require ( github.com/snowflakedb/gosnowflake v1.11.2 github.com/srebhan/cborquery v1.0.1 github.com/srebhan/protobufquery v1.0.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.34.0 github.com/testcontainers/testcontainers-go/modules/kafka v0.34.0 @@ -213,8 +213,8 @@ require ( go.step.sm/crypto v0.54.0 golang.org/x/crypto v0.31.0 golang.org/x/mod v0.21.0 - golang.org/x/net v0.31.0 - golang.org/x/oauth2 v0.23.0 + golang.org/x/net v0.32.0 + golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.28.0 golang.org/x/term v0.27.0 @@ -224,7 +224,7 @@ require ( google.golang.org/api v0.203.0 google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 google.golang.org/grpc v1.68.0 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.35.2 gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/olivere/elastic.v5 v5.0.86 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 diff --git a/go.sum b/go.sum index c3d14f7137813..c11b686ddd6af 100644 --- a/go.sum +++ b/go.sum @@ -2136,8 +2136,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= -github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -2313,8 +2313,9 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA= @@ -2704,8 +2705,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2735,8 +2736,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3318,8 +3319,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 3b87986f42535e68e73767fa9f4b4ac0c7b8b744 Mon Sep 17 00:00:00 2001 From: Dmitry Khamitov Date: Tue, 17 Dec 2024 17:09:13 +0000 Subject: [PATCH 093/170] fix(inputs.mongodb): Do not dereference nil pointer if gathering database stats fails (#16310) --- plugins/inputs/mongodb/mongodb_server.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index b0ea0bb35ad29..c8369c68bb89b 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -327,7 +327,8 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe for _, name := range names { db, err := s.gatherDBStats(name) if err != nil { - s.log.Debugf("Error getting db stats from %q: %s", name, err.Error()) + s.log.Errorf("Error getting db stats from %q: %v", name, err) + continue } dbStats.Dbs = append(dbStats.Dbs, *db) } From e2b5a9910b9804ff517543f9a0a40e3b71a35f34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 17 Dec 2024 18:10:18 +0100 Subject: [PATCH 094/170] chore: Fix linter findings for `revive:exported` in `plugins/inputs/p*` (#16307) --- plugins/inputs/p4runtime/p4runtime_test.go | 18 +- plugins/inputs/passenger/passenger.go | 72 +++--- plugins/inputs/passenger/passenger_test.go | 8 +- plugins/inputs/pf/pf.go | 158 ++++++------ plugins/inputs/pgbouncer/pgbouncer.go | 18 +- plugins/inputs/phpfpm/child.go | 41 +--- plugins/inputs/phpfpm/fcgi_client.go | 2 +- plugins/inputs/phpfpm/fcgi_test.go | 4 +- plugins/inputs/phpfpm/phpfpm.go | 95 ++++--- plugins/inputs/phpfpm/phpfpm_test.go | 26 +- plugins/inputs/ping/ping.go | 144 +++++------ plugins/inputs/ping/ping_windows_test.go | 8 +- plugins/inputs/postfix/postfix.go | 60 ++--- plugins/inputs/postfix/postfix_windows.go | 6 +- plugins/inputs/postgresql/postgresql.go | 12 +- .../postgresql_extensible.go | 18 +- plugins/inputs/powerdns/powerdns.go | 9 +- .../powerdns_recursor/powerdns_recursor.go | 4 +- .../inputs/processes/processes_notwindows.go | 10 +- plugins/inputs/procstat/filter.go | 30 +-- plugins/inputs/procstat/native_finder.go | 52 ++-- plugins/inputs/procstat/native_finder_test.go | 20 +- plugins/inputs/procstat/os_linux.go | 28 +-- plugins/inputs/procstat/os_others.go | 16 +- plugins/inputs/procstat/os_windows.go | 20 +- plugins/inputs/procstat/pgrep.go | 30 +-- plugins/inputs/procstat/process.go | 68 ++--- plugins/inputs/procstat/procstat.go | 130 +++++----- plugins/inputs/procstat/procstat_test.go | 100 ++++---- plugins/inputs/procstat/service_finders.go | 15 +- plugins/inputs/prometheus/consul.go | 22 +- plugins/inputs/prometheus/kubernetes.go | 38 +-- plugins/inputs/prometheus/kubernetes_test.go | 48 ++-- plugins/inputs/prometheus/prometheus.go | 232 +++++++++--------- plugins/inputs/prometheus/prometheus_test.go | 2 +- plugins/inputs/proxmox/proxmox.go | 36 +-- plugins/inputs/proxmox/structs.go | 20 +- plugins/inputs/puppetagent/puppetagent.go | 9 +- 38 files changed, 777 insertions(+), 852 deletions(-) diff --git a/plugins/inputs/p4runtime/p4runtime_test.go b/plugins/inputs/p4runtime/p4runtime_test.go index 58dbb8336ceaa..2972963fc0fed 100644 --- a/plugins/inputs/p4runtime/p4runtime_test.go +++ b/plugins/inputs/p4runtime/p4runtime_test.go @@ -43,7 +43,7 @@ func createEntityCounterEntry( } } -func NewTestP4RuntimeClient( +func newTestP4RuntimeClient( p4RuntimeClient *fakeP4RuntimeClient, addr string, t *testing.T, @@ -102,7 +102,7 @@ func TestErrorGetP4Info(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.Error(t, plugin.Gather(&acc)) @@ -245,7 +245,7 @@ func TestOneCounterRead(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -333,7 +333,7 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -425,7 +425,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -457,7 +457,7 @@ func TestNoCountersAvailable(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -484,7 +484,7 @@ func TestFilterCounters(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) plugin.CounterNamesInclude = []string{"oof"} @@ -534,7 +534,7 @@ func TestFailReadCounterEntryFromEntry(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -577,7 +577,7 @@ func TestFailReadAllEntries(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 7123cc70b012d..0175a5000ced5 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -19,22 +19,8 @@ import ( //go:embed sample.conf var sampleConfig string -type passenger struct { - Command string -} - -func (p *passenger) parseCommand() (string, []string) { - var arguments []string - if !strings.Contains(p.Command, " ") { - return p.Command, arguments - } - - arguments = strings.Split(p.Command, " ") - if len(arguments) == 1 { - return arguments[0], arguments[1:] - } - - return arguments[0], arguments[1:] +type Passenger struct { + Command string `toml:"command"` } type info struct { @@ -91,6 +77,39 @@ type process struct { ProcessGroupID string `xml:"process_group_id"` } +func (*Passenger) SampleConfig() string { + return sampleConfig +} + +func (p *Passenger) Gather(acc telegraf.Accumulator) error { + if p.Command == "" { + p.Command = "passenger-status -v --show=xml" + } + + cmd, args := p.parseCommand() + out, err := exec.Command(cmd, args...).Output() + + if err != nil { + return err + } + + return importMetric(out, acc) +} + +func (p *Passenger) parseCommand() (string, []string) { + var arguments []string + if !strings.Contains(p.Command, " ") { + return p.Command, arguments + } + + arguments = strings.Split(p.Command, " ") + if len(arguments) == 1 { + return arguments[0], arguments[1:] + } + + return arguments[0], arguments[1:] +} + func (p *process) getUptime() int64 { if p.Uptime == "" { return 0 @@ -131,25 +150,6 @@ func (p *process) getUptime() int64 { return uptime } -func (*passenger) SampleConfig() string { - return sampleConfig -} - -func (p *passenger) Gather(acc telegraf.Accumulator) error { - if p.Command == "" { - p.Command = "passenger-status -v --show=xml" - } - - cmd, args := p.parseCommand() - out, err := exec.Command(cmd, args...).Output() - - if err != nil { - return err - } - - return importMetric(out, acc) -} - func importMetric(stat []byte, acc telegraf.Accumulator) error { var p info @@ -231,6 +231,6 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { func init() { inputs.Add("passenger", func() telegraf.Input { - return &passenger{} + return &Passenger{} }) } diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index 6c53578d7e636..49411d04919d5 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -39,7 +39,7 @@ func teardown(tempFilePath string) { } func Test_Invalid_Passenger_Status_Cli(t *testing.T) { - r := &passenger{ + r := &Passenger{ Command: "an-invalid-command passenger-status", } @@ -55,7 +55,7 @@ func Test_Invalid_Xml(t *testing.T) { require.NoError(t, err) defer teardown(tempFilePath) - r := &passenger{ + r := &Passenger{ Command: tempFilePath, } @@ -72,7 +72,7 @@ func Test_Default_Config_Load_Default_Command(t *testing.T) { require.NoError(t, err) defer teardown(tempFilePath) - r := &passenger{} + r := &Passenger{} var acc testutil.Accumulator @@ -87,7 +87,7 @@ func TestPassengerGenerateMetric(t *testing.T) { defer teardown(tempFilePath) // Now we tested again above server, with our authentication data - r := &passenger{ + r := &Passenger{ Command: tempFilePath, } diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 204c30a5dbc96..20709aaf750d9 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -18,26 +18,81 @@ import ( //go:embed sample.conf var sampleConfig string -const measurement = "pf" -const pfctlCommand = "pfctl" +var ( + errParseHeader = fmt.Errorf("cannot find header in %s output", pfctlCommand) + anyTableHeaderRE = regexp.MustCompile("^[A-Z]") + stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) + counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) + execLookPath = exec.LookPath + execCommand = exec.Command + pfctlOutputStanzas = []*pfctlOutputStanza{ + { + headerRE: regexp.MustCompile("^State Table"), + parseFunc: parseStateTable, + }, + { + headerRE: regexp.MustCompile("^Counters"), + parseFunc: parseCounterTable, + }, + } + stateTable = []*entry{ + {"entries", "current entries", -1}, + {"searches", "searches", -1}, + {"inserts", "inserts", -1}, + {"removals", "removals", -1}, + } + counterTable = []*entry{ + {"match", "match", -1}, + {"bad-offset", "bad-offset", -1}, + {"fragment", "fragment", -1}, + {"short", "short", -1}, + {"normalize", "normalize", -1}, + {"memory", "memory", -1}, + {"bad-timestamp", "bad-timestamp", -1}, + {"congestion", "congestion", -1}, + {"ip-option", "ip-option", -1}, + {"proto-cksum", "proto-cksum", -1}, + {"state-mismatch", "state-mismatch", -1}, + {"state-insert", "state-insert", -1}, + {"state-limit", "state-limit", -1}, + {"src-limit", "src-limit", -1}, + {"synproxy", "synproxy", -1}, + } +) + +const ( + measurement = "pf" + pfctlCommand = "pfctl" +) type PF struct { - PfctlCommand string - PfctlArgs []string - UseSudo bool - StateTable []*Entry + UseSudo bool `toml:"use_sudo"` + + pfctlCommand string + pfctlArgs []string infoFunc func() (string, error) } +type pfctlOutputStanza struct { + headerRE *regexp.Regexp + parseFunc func([]string, map[string]interface{}) error + found bool +} + +type entry struct { + field string + pfctlTitle string + value int64 +} + func (*PF) SampleConfig() string { return sampleConfig } -// Gather is the entrypoint for the plugin. func (pf *PF) Gather(acc telegraf.Accumulator) error { - if pf.PfctlCommand == "" { + if pf.pfctlCommand == "" { var err error - if pf.PfctlCommand, pf.PfctlArgs, err = pf.buildPfctlCmd(); err != nil { + if pf.pfctlCommand, pf.pfctlArgs, err = pf.buildPfctlCmd(); err != nil { acc.AddError(fmt.Errorf("can't construct pfctl commandline: %w", err)) return nil } @@ -55,38 +110,17 @@ func (pf *PF) Gather(acc telegraf.Accumulator) error { return nil } -var errParseHeader = fmt.Errorf("cannot find header in %s output", pfctlCommand) - func errMissingData(tag string) error { return fmt.Errorf("struct data for tag %q not found in %s output", tag, pfctlCommand) } -type pfctlOutputStanza struct { - HeaderRE *regexp.Regexp - ParseFunc func([]string, map[string]interface{}) error - Found bool -} - -var pfctlOutputStanzas = []*pfctlOutputStanza{ - { - HeaderRE: regexp.MustCompile("^State Table"), - ParseFunc: parseStateTable, - }, - { - HeaderRE: regexp.MustCompile("^Counters"), - ParseFunc: parseCounterTable, - }, -} - -var anyTableHeaderRE = regexp.MustCompile("^[A-Z]") - func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(strings.NewReader(pfoutput)) for scanner.Scan() { line := scanner.Text() for _, s := range pfctlOutputStanzas { - if s.HeaderRE.MatchString(line) { + if s.headerRE.MatchString(line) { var stanzaLines []string scanner.Scan() line = scanner.Text() @@ -98,15 +132,15 @@ func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error } line = scanner.Text() } - if perr := s.ParseFunc(stanzaLines, fields); perr != nil { + if perr := s.parseFunc(stanzaLines, fields); perr != nil { return perr } - s.Found = true + s.found = true } } } for _, s := range pfctlOutputStanzas { - if !s.Found { + if !s.found { return errParseHeader } } @@ -115,57 +149,22 @@ func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error return nil } -type Entry struct { - Field string - PfctlTitle string - Value int64 -} - -var StateTable = []*Entry{ - {"entries", "current entries", -1}, - {"searches", "searches", -1}, - {"inserts", "inserts", -1}, - {"removals", "removals", -1}, -} - -var stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) - func parseStateTable(lines []string, fields map[string]interface{}) error { - return storeFieldValues(lines, stateTableRE, fields, StateTable) + return storeFieldValues(lines, stateTableRE, fields, stateTable) } -var CounterTable = []*Entry{ - {"match", "match", -1}, - {"bad-offset", "bad-offset", -1}, - {"fragment", "fragment", -1}, - {"short", "short", -1}, - {"normalize", "normalize", -1}, - {"memory", "memory", -1}, - {"bad-timestamp", "bad-timestamp", -1}, - {"congestion", "congestion", -1}, - {"ip-option", "ip-option", -1}, - {"proto-cksum", "proto-cksum", -1}, - {"state-mismatch", "state-mismatch", -1}, - {"state-insert", "state-insert", -1}, - {"state-limit", "state-limit", -1}, - {"src-limit", "src-limit", -1}, - {"synproxy", "synproxy", -1}, -} - -var counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) - func parseCounterTable(lines []string, fields map[string]interface{}) error { - return storeFieldValues(lines, counterTableRE, fields, CounterTable) + return storeFieldValues(lines, counterTableRE, fields, counterTable) } -func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*Entry) error { +func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*entry) error { for _, v := range lines { entries := regex.FindStringSubmatch(v) if entries != nil { for _, f := range entryTable { - if f.PfctlTitle == entries[1] { + if f.pfctlTitle == entries[1] { var err error - if f.Value, err = strconv.ParseInt(entries[2], 10, 64); err != nil { + if f.value, err = strconv.ParseInt(entries[2], 10, 64); err != nil { return err } } @@ -174,17 +173,17 @@ func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]in } for _, v := range entryTable { - if v.Value == -1 { - return errMissingData(v.PfctlTitle) + if v.value == -1 { + return errMissingData(v.pfctlTitle) } - fields[v.Field] = v.Value + fields[v.field] = v.value } return nil } func (pf *PF) callPfctl() (string, error) { - cmd := execCommand(pf.PfctlCommand, pf.PfctlArgs...) + cmd := execCommand(pf.pfctlCommand, pf.pfctlArgs...) out, oerr := cmd.Output() if oerr != nil { var ee *exec.ExitError @@ -196,9 +195,6 @@ func (pf *PF) callPfctl() (string, error) { return string(out), oerr } -var execLookPath = exec.LookPath -var execCommand = exec.Command - func (pf *PF) buildPfctlCmd() (string, []string, error) { cmd, err := execLookPath(pfctlCommand) if err != nil { diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 4d079e1731f0a..2c6ccf43bc4bd 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -16,6 +16,11 @@ import ( //go:embed sample.conf var sampleConfig string +var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, + "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, + "force_user": true, "host": true, "port": true, "name": true, +} + type PgBouncer struct { ShowCommands []string `toml:"show_commands"` postgresql.Config @@ -23,11 +28,6 @@ type PgBouncer struct { service *postgresql.Service } -var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, - "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, - "force_user": true, "host": true, "port": true, "name": true, -} - func (*PgBouncer) SampleConfig() string { return sampleConfig } @@ -58,10 +58,6 @@ func (p *PgBouncer) Start(_ telegraf.Accumulator) error { return p.service.Start() } -func (p *PgBouncer) Stop() { - p.service.Stop() -} - func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { for _, cmd := range p.ShowCommands { switch cmd { @@ -87,6 +83,10 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { return nil } +func (p *PgBouncer) Stop() { + p.service.Stop() +} + func (p *PgBouncer) accRow(row *sql.Rows, columns []string) (map[string]string, map[string]*interface{}, error) { var dbname bytes.Buffer diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 3448db40be4a9..f921dc4bf13d2 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,10 +10,8 @@ import ( "errors" "fmt" "io" - "net" "net/http" "net/http/cgi" - "os" "strings" "sync" "time" @@ -164,13 +162,13 @@ var errCloseConn = errors.New("fcgi: connection should be closed") var emptyBody = io.NopCloser(strings.NewReader("")) -// ErrRequestAborted is returned by Read when a handler attempts to read the +// errRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. -var ErrRequestAborted = errors.New("fcgi: request aborted by web server") +var errRequestAborted = errors.New("fcgi: request aborted by web server") -// ErrConnClosed is returned by Read when a handler attempts to read the body of +// errConnClosed is returned by Read when a handler attempts to read the body of // a request after the connection to the web server has been closed. -var ErrConnClosed = errors.New("fcgi: connection to web server closed") +var errConnClosed = errors.New("fcgi: connection to web server closed") func (c *child) handleRecord(rec *record) error { c.mu.Lock() @@ -249,7 +247,7 @@ func (c *child) handleRecord(rec *record) error { return err } if req.pw != nil { - req.pw.CloseWithError(ErrRequestAborted) + req.pw.CloseWithError(errRequestAborted) } if !req.keepConn { // connection will close upon return @@ -306,34 +304,7 @@ func (c *child) cleanUp() { if req.pw != nil { // race with call to Close in c.serveRequest doesn't matter because // Pipe(Reader|Writer).Close are idempotent - req.pw.CloseWithError(ErrConnClosed) + req.pw.CloseWithError(errConnClosed) } } } - -// Serve accepts incoming FastCGI connections on the listener l, creating a new -// goroutine for each. The goroutine reads requests and then calls handler -// to reply to them. -// If l is nil, Serve accepts connections from os.Stdin. -// If handler is nil, http.DefaultServeMux is used. -func Serve(l net.Listener, handler http.Handler) error { - if l == nil { - var err error - l, err = net.FileListener(os.Stdin) - if err != nil { - return err - } - defer l.Close() - } - if handler == nil { - handler = http.DefaultServeMux - } - for { - rw, err := l.Accept() - if err != nil { - return err - } - c := newChild(rw, handler) - go c.serve() - } -} diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index f33b68d0af9a5..e982471b3d0e6 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -44,7 +44,7 @@ func newFcgiClient(timeout time.Duration, h string, args ...interface{}) (*conn, return &conn{rwc: con}, nil } -func (c *conn) Request(env map[string]string, requestData string) (retout, reterr []byte, err error) { +func (c *conn) request(env map[string]string, requestData string) (retout, reterr []byte, err error) { defer c.rwc.Close() var reqID uint16 = 1 diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index f96c22b6fec90..d039685bb05f8 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -206,7 +206,7 @@ var cleanUpTests = []struct { makeRecord(typeAbortRequest, nil), }, nil), - ErrRequestAborted, + errRequestAborted, }, // confirm that child.serve closes all pipes after error reading record { @@ -215,7 +215,7 @@ var cleanUpTests = []struct { nil, }, nil), - ErrConnClosed, + errConnClosed, }, } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index e1b3ce515fd30..9b3c5dc2704c4 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -26,22 +26,31 @@ import ( var sampleConfig string const ( - PfPool = "pool" - PfProcessManager = "process manager" - PfStartSince = "start since" - PfAcceptedConn = "accepted conn" - PfListenQueue = "listen queue" - PfMaxListenQueue = "max listen queue" - PfListenQueueLen = "listen queue len" - PfIdleProcesses = "idle processes" - PfActiveProcesses = "active processes" - PfTotalProcesses = "total processes" - PfMaxActiveProcesses = "max active processes" - PfMaxChildrenReached = "max children reached" - PfSlowRequests = "slow requests" + pfPool = "pool" + pfStartSince = "start since" + pfAcceptedConn = "accepted conn" + pfListenQueue = "listen queue" + pfMaxListenQueue = "max listen queue" + pfListenQueueLen = "listen queue len" + pfIdleProcesses = "idle processes" + pfActiveProcesses = "active processes" + pfTotalProcesses = "total processes" + pfMaxActiveProcesses = "max active processes" + pfMaxChildrenReached = "max children reached" + pfSlowRequests = "slow requests" ) -type JSONMetrics struct { +type Phpfpm struct { + Format string `toml:"format"` + Timeout config.Duration `toml:"timeout"` + Urls []string `toml:"urls"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig + + client *http.Client +} + +type jsonMetrics struct { Pool string `json:"pool"` ProcessManager string `json:"process manager"` StartTime int `json:"start time"` @@ -76,21 +85,11 @@ type JSONMetrics struct { type metricStat map[string]int64 type poolStat map[string]metricStat -type phpfpm struct { - Format string `toml:"format"` - Timeout config.Duration `toml:"timeout"` - Urls []string `toml:"urls"` - Log telegraf.Logger `toml:"-"` - tls.ClientConfig - - client *http.Client -} - -func (*phpfpm) SampleConfig() string { +func (*Phpfpm) SampleConfig() string { return sampleConfig } -func (p *phpfpm) Init() error { +func (p *Phpfpm) Init() error { if len(p.Urls) == 0 { p.Urls = []string{"http://127.0.0.1/status"} } @@ -118,9 +117,7 @@ func (p *phpfpm) Init() error { return nil } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (p *phpfpm) Gather(acc telegraf.Accumulator) error { +func (p *Phpfpm) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, serv := range expandUrls(acc, p.Urls) { wg.Add(1) @@ -136,7 +133,7 @@ func (p *phpfpm) Gather(acc telegraf.Accumulator) error { } // Request status page to get stat raw data and import it -func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { +func (p *Phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { return p.gatherHTTP(addr, acc) } @@ -187,8 +184,8 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { } // Gather stat using fcgi protocol -func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error { - fpmOutput, fpmErr, err := fcgi.Request(map[string]string{ +func (p *Phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error { + fpmOutput, fpmErr, err := fcgi.request(map[string]string{ "SCRIPT_NAME": "/" + statusPath, "SCRIPT_FILENAME": statusPath, "REQUEST_METHOD": "GET", @@ -206,7 +203,7 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula } // Gather stat using http protocol -func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { +func (p *Phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { return fmt.Errorf("unable parse server address %q: %w", addr, err) @@ -232,7 +229,7 @@ func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { } // Import stat data into Telegraf system -func (p *phpfpm) importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { +func (p *Phpfpm) importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { if p.Format == "json" { p.parseJSON(r, acc, addr) } else { @@ -254,7 +251,7 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { } fieldName := strings.Trim(keyvalue[0], " ") // We start to gather data for a new pool here - if fieldName == PfPool { + if fieldName == pfPool { currentPool = strings.Trim(keyvalue[1], " ") stats[currentPool] = make(metricStat) continue @@ -262,17 +259,17 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { // Start to parse metric for current pool switch fieldName { - case PfStartSince, - PfAcceptedConn, - PfListenQueue, - PfMaxListenQueue, - PfListenQueueLen, - PfIdleProcesses, - PfActiveProcesses, - PfTotalProcesses, - PfMaxActiveProcesses, - PfMaxChildrenReached, - PfSlowRequests: + case pfStartSince, + pfAcceptedConn, + pfListenQueue, + pfMaxListenQueue, + pfListenQueueLen, + pfIdleProcesses, + pfActiveProcesses, + pfTotalProcesses, + pfMaxActiveProcesses, + pfMaxChildrenReached, + pfSlowRequests: fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) if err == nil { stats[currentPool][fieldName] = fieldValue @@ -294,8 +291,8 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { } } -func (p *phpfpm) parseJSON(r io.Reader, acc telegraf.Accumulator, addr string) { - var metrics JSONMetrics +func (p *Phpfpm) parseJSON(r io.Reader, acc telegraf.Accumulator, addr string) { + var metrics jsonMetrics if err := json.NewDecoder(r).Decode(&metrics); err != nil { p.Log.Errorf("Unable to decode JSON response: %s", err) return @@ -402,6 +399,6 @@ func isNetworkURL(addr string) bool { func init() { inputs.Add("phpfpm", func() telegraf.Input { - return &phpfpm{} + return &Phpfpm{} }) } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 92b3affa7ad08..802c761532ccc 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -56,7 +56,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { defer ts.Close() url := ts.URL + "?test=ok" - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{url}, Log: &testutil.Logger{}, } @@ -106,7 +106,7 @@ func TestPhpFpmGeneratesJSONMetrics_From_Http(t *testing.T) { expected, err := testutil.ParseMetricsFromFile("testdata/expected.out", parser) require.NoError(t, err) - input := &phpfpm{ + input := &Phpfpm{ Urls: []string{server.URL + "?full&json"}, Format: "json", Log: &testutil.Logger{}, @@ -128,7 +128,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, Log: &testutil.Logger{}, } @@ -179,7 +179,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) { }() // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, Timeout: config.Duration(timeout), Log: &testutil.Logger{}, @@ -211,7 +211,7 @@ func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) { const timeout = 200 * time.Millisecond // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcpAddress + "/status"}, Timeout: config.Duration(timeout), Log: &testutil.Logger{}, @@ -237,7 +237,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { s := statServer{} go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{tcp.Addr().String()}, Log: &testutil.Logger{}, } @@ -289,7 +289,7 @@ func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { go fcgi.Serve(tcp1, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway go fcgi.Serve(tcp2, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, Log: &testutil.Logger{}, } @@ -340,7 +340,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { s := statServer{} go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{tcp.Addr().String() + ":custom-status-path"}, Log: &testutil.Logger{}, } @@ -374,7 +374,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // When not passing server config, we default to localhost // We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"http://bad.localhost:62001/status"}, Log: &testutil.Logger{}, } @@ -389,7 +389,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t t.Skip("Skipping long test in short mode") } - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"http://aninvalidone"}, Log: &testutil.Logger{}, } @@ -402,7 +402,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"/tmp/invalid.sock"}, Log: &testutil.Logger{}, } @@ -435,7 +435,7 @@ var outputSampleJSON []byte func TestPhpFpmParseJSON_Log_Error_Without_Panic_When_When_JSON_Is_Invalid(t *testing.T) { // Capture the logging output for checking logger := &testutil.CaptureLogger{Name: "inputs.phpfpm"} - plugin := &phpfpm{Log: logger} + plugin := &Phpfpm{Log: logger} require.NoError(t, plugin.Init()) // parse valid JSON without panic and without log output @@ -459,7 +459,7 @@ func TestGatherDespiteUnavailable(t *testing.T) { go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status", "/lala"}, Log: &testutil.Logger{}, } diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 9f2e692f1cf70..8538d394bc809 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -28,71 +28,69 @@ const ( defaultPingDataBytesSize = 56 ) -// HostPinger is a function that runs the "ping" function using a list of -// passed arguments. This can be easily switched with a mocked ping function -// for unit test purposes (see ping_test.go) -type HostPinger func(binary string, timeout float64, args ...string) (string, error) - type Ping struct { - // wg is used to wait for ping with multiple URLs - wg sync.WaitGroup - - // Pre-calculated interval and timeout - calcInterval time.Duration - calcTimeout time.Duration - - sourceAddress string - - Log telegraf.Logger `toml:"-"` - - // Interval at which to ping (ping -i ) - PingInterval float64 `toml:"ping_interval"` - - // Number of pings to send (ping -c ) - Count int - - // Per-ping timeout, in seconds. 0 means no timeout (ping -W ) - Timeout float64 - - // Ping deadline, in seconds. 0 means no deadline. (ping -w ) - Deadline int - - // Interface or source address to send ping from (ping -I/-S ) - Interface string - - // URLs to ping - Urls []string - - // Method defines how to ping (native or exec) - Method string + Urls []string `toml:"urls"` // URLs to ping + Method string `toml:"method"` // Method defines how to ping (native or exec) + Count int `toml:"count"` // Number of pings to send (ping -c ) + PingInterval float64 `toml:"ping_interval"` // Interval at which to ping (ping -i ) + Timeout float64 `toml:"timeout"` // Per-ping timeout, in seconds. 0 means no timeout (ping -W ) + Deadline int `toml:"deadline"` // Ping deadline, in seconds. 0 means no deadline. (ping -w ) + Interface string `toml:"interface"` // Interface or source address to send ping from (ping -I/-S ) + Percentiles []int `toml:"percentiles"` // Calculate the given percentiles when using native method + Binary string `toml:"binary"` // Ping executable binary + // Arguments for ping command. When arguments are not empty, system binary will be used and other options (ping_interval, timeout, etc.) will be ignored + Arguments []string `toml:"arguments"` + IPv4 bool `toml:"ipv4"` // Whether to resolve addresses using ipv4 or not. + IPv6 bool `toml:"ipv6"` // Whether to resolve addresses using ipv6 or not. + Size *int `toml:"size"` // Packet size + Log telegraf.Logger `toml:"-"` + + wg sync.WaitGroup // wg is used to wait for ping with multiple URLs + calcInterval time.Duration // Pre-calculated interval and timeout + calcTimeout time.Duration + sourceAddress string + pingHost hostPingerFunc // host ping function + nativePingFunc nativePingFunc +} - // Ping executable binary - Binary string +// hostPingerFunc is a function that runs the "ping" function using a list of +// passed arguments. This can be easily switched with a mocked ping function +// for unit test purposes (see ping_test.go) +type hostPingerFunc func(binary string, timeout float64, args ...string) (string, error) - // Arguments for ping command. When arguments is not empty, system binary will be used and - // other options (ping_interval, timeout, etc.) will be ignored - Arguments []string +type nativePingFunc func(destination string) (*pingStats, error) - // Whether to resolve addresses using ipv4 or not. - IPv4 bool +type durationSlice []time.Duration - // Whether to resolve addresses using ipv6 or not. - IPv6 bool +type pingStats struct { + ping.Statistics + ttl int +} - // host ping function - pingHost HostPinger +func (*Ping) SampleConfig() string { + return sampleConfig +} - nativePingFunc NativePingFunc +func (p *Ping) Init() error { + if p.Count < 1 { + return errors.New("bad number of packets to transmit") + } - // Calculate the given percentiles when using native method - Percentiles []int + // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping + if p.PingInterval < 0.2 { + p.calcInterval = time.Duration(.2 * float64(time.Second)) + } else { + p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) + } - // Packet size - Size *int -} + // If no timeout is given default to 5 seconds, matching original implementation + if p.Timeout == 0 { + p.calcTimeout = time.Duration(5) * time.Second + } else { + p.calcTimeout = time.Duration(p.Timeout) * time.Second + } -func (*Ping) SampleConfig() string { - return sampleConfig + return nil } func (p *Ping) Gather(acc telegraf.Accumulator) error { @@ -115,13 +113,6 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return nil } -type pingStats struct { - ping.Statistics - ttl int -} - -type NativePingFunc func(destination string) (*pingStats, error) - func (p *Ping) nativePing(destination string) (*pingStats, error) { ps := &pingStats{} @@ -259,11 +250,11 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { acc.AddFields("ping", fields, tags) } -type durationSlice []time.Duration +func (p durationSlice) Len() int { return len(p) } -func (p durationSlice) Len() int { return len(p) } func (p durationSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // R7 from Hyndman and Fan (1996), which matches Excel func percentile(values durationSlice, perc int) time.Duration { @@ -292,29 +283,6 @@ func percentile(values durationSlice, perc int) time.Duration { return lower + time.Duration(rankFraction*float64(upper-lower)) } -// Init ensures the plugin is configured correctly. -func (p *Ping) Init() error { - if p.Count < 1 { - return errors.New("bad number of packets to transmit") - } - - // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping - if p.PingInterval < 0.2 { - p.calcInterval = time.Duration(.2 * float64(time.Second)) - } else { - p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) - } - - // If no timeout is given default to 5 seconds, matching original implementation - if p.Timeout == 0 { - p.calcTimeout = time.Duration(5) * time.Second - } else { - p.calcTimeout = time.Duration(p.Timeout) * time.Second - } - - return nil -} - func hostPinger(binary string, timeout float64, args ...string) (string, error) { bin, err := exec.LookPath(binary) if err != nil { diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 4517bf8f33736..93b2bd04ff99a 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -261,7 +261,7 @@ func TestFatalPingGather(t *testing.T) { "Fatal ping should not have packet measurements") } -var UnreachablePingOutput = ` +var unreachablePingOutput = ` Pinging www.google.pl [8.8.8.8] with 32 bytes of data: Request timed out. Request timed out. @@ -273,7 +273,7 @@ Ping statistics for 8.8.8.8: ` func mockUnreachableHostPinger(string, float64, ...string) (string, error) { - return UnreachablePingOutput, errors.New("so very bad") + return unreachablePingOutput, errors.New("so very bad") } // Reply from 185.28.251.217: TTL expired in transit. @@ -312,7 +312,7 @@ func TestUnreachablePingGather(t *testing.T) { "Fatal ping should not have packet measurements") } -var TTLExpiredPingOutput = ` +var ttlExpiredPingOutput = ` Pinging www.google.pl [8.8.8.8] with 32 bytes of data: Request timed out. Request timed out. @@ -324,7 +324,7 @@ Ping statistics for 8.8.8.8: ` func mockTTLExpiredPinger(string, float64, ...string) (string, error) { - return TTLExpiredPingOutput, errors.New("so very bad") + return ttlExpiredPingOutput, errors.New("so very bad") } // in case 'Destination net unreachable' ping app return receive packet which is not what we need diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index cc5c7024c57e8..f657404d2882e 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -21,6 +21,36 @@ import ( //go:embed sample.conf var sampleConfig string +type Postfix struct { + QueueDirectory string `toml:"queue_directory"` +} + +func (*Postfix) SampleConfig() string { + return sampleConfig +} + +func (p *Postfix) Gather(acc telegraf.Accumulator) error { + if p.QueueDirectory == "" { + var err error + p.QueueDirectory, err = getQueueDirectory() + if err != nil { + return fmt.Errorf("unable to determine queue directory: %w", err) + } + } + + for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { + fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) + if err != nil { + acc.AddError(fmt.Errorf("error scanning queue %q: %w", q, err)) + continue + } + + acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) + } + + return nil +} + func getQueueDirectory() (string, error) { qd, err := exec.Command("postconf", "-h", "queue_directory").Output() if err != nil { @@ -75,36 +105,6 @@ func qScan(path string, acc telegraf.Accumulator) (map[string]interface{}, error return fields, nil } -type Postfix struct { - QueueDirectory string -} - -func (*Postfix) SampleConfig() string { - return sampleConfig -} - -func (p *Postfix) Gather(acc telegraf.Accumulator) error { - if p.QueueDirectory == "" { - var err error - p.QueueDirectory, err = getQueueDirectory() - if err != nil { - return fmt.Errorf("unable to determine queue directory: %w", err) - } - } - - for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { - fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) - if err != nil { - acc.AddError(fmt.Errorf("error scanning queue %q: %w", q, err)) - continue - } - - acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) - } - - return nil -} - func init() { inputs.Add("postfix", func() telegraf.Input { return &Postfix{ diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go index 3b027f24a2ade..9831787ff7194 100644 --- a/plugins/inputs/postfix/postfix_windows.go +++ b/plugins/inputs/postfix/postfix_windows.go @@ -16,11 +16,13 @@ type Postfix struct { Log telegraf.Logger `toml:"-"` } +func (*Postfix) SampleConfig() string { return sampleConfig } + func (p *Postfix) Init() error { - p.Log.Warn("current platform is not supported") + p.Log.Warn("Current platform is not supported") return nil } -func (*Postfix) SampleConfig() string { return sampleConfig } + func (*Postfix) Gather(_ telegraf.Accumulator) error { return nil } func init() { diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index dc8f37ca8d6d3..46b2354874cb2 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -16,6 +16,8 @@ import ( //go:embed sample.conf var sampleConfig string +var ignoredColumns = map[string]bool{"stats_reset": true} + type Postgresql struct { Databases []string `toml:"databases"` IgnoredDatabases []string `toml:"ignored_databases"` @@ -25,8 +27,6 @@ type Postgresql struct { service *postgresql.Service } -var ignoredColumns = map[string]bool{"stats_reset": true} - func (*Postgresql) SampleConfig() string { return sampleConfig } @@ -47,10 +47,6 @@ func (p *Postgresql) Start(_ telegraf.Accumulator) error { return p.service.Start() } -func (p *Postgresql) Stop() { - p.service.Stop() -} - func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var query string if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 { @@ -106,6 +102,10 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { return bgWriterRow.Err() } +func (p *Postgresql) Stop() { + p.service.Stop() +} + func (p *Postgresql) accRow(row *sql.Rows, acc telegraf.Accumulator, columns []string) error { var dbname bytes.Buffer diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index a4d867b8435c6..cb10f266bcedd 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -21,6 +21,8 @@ import ( //go:embed sample.conf var sampleConfig string +var ignoredColumns = map[string]bool{"stats_reset": true} + type Postgresql struct { Databases []string `deprecated:"1.22.4;use the sqlquery option to specify database to use"` Query []query `toml:"query"` @@ -45,7 +47,9 @@ type query struct { additionalTags map[string]bool } -var ignoredColumns = map[string]bool{"stats_reset": true} +type scanner interface { + Scan(dest ...interface{}) error +} func (*Postgresql) SampleConfig() string { return sampleConfig @@ -102,10 +106,6 @@ func (p *Postgresql) Start(_ telegraf.Accumulator) error { return p.service.Start() } -func (p *Postgresql) Stop() { - p.service.Stop() -} - func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // Retrieving the database version query := `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` @@ -128,6 +128,10 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { return nil } +func (p *Postgresql) Stop() { + p.service.Stop() +} + func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, q query, timestamp time.Time) error { rows, err := p.service.DB.Query(q.Sqlquery) if err != nil { @@ -150,10 +154,6 @@ func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, q query, t return nil } -type scanner interface { - Scan(dest ...interface{}) error -} - func (p *Postgresql) accRow(acc telegraf.Accumulator, row scanner, columns []string, q query, timestamp time.Time) error { // this is where we'll store the column name with its *interface{} columnMap := make(map[string]*interface{}) diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 44c765348a646..5ac8397e077fc 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -19,14 +19,13 @@ import ( //go:embed sample.conf var sampleConfig string -type Powerdns struct { - UnixSockets []string +const defaultTimeout = 5 * time.Second - Log telegraf.Logger `toml:"-"` +type Powerdns struct { + UnixSockets []string `toml:"unix_sockets"` + Log telegraf.Logger `toml:"-"` } -var defaultTimeout = 5 * time.Second - func (*Powerdns) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 48e83179a4746..48a77518f5a6a 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -14,6 +14,8 @@ import ( //go:embed sample.conf var sampleConfig string +const defaultTimeout = 5 * time.Second + type PowerdnsRecursor struct { UnixSockets []string `toml:"unix_sockets"` SocketDir string `toml:"socket_dir"` @@ -26,8 +28,6 @@ type PowerdnsRecursor struct { gatherFromServer func(address string, acc telegraf.Accumulator) error } -var defaultTimeout = 5 * time.Second - func (*PowerdnsRecursor) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index c574238fd5a23..e476e8ff2454f 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -19,15 +19,13 @@ import ( ) type Processes struct { - UseSudo bool `toml:"use_sudo"` + UseSudo bool `toml:"use_sudo"` + Log telegraf.Logger `toml:"-"` execPS func(UseSudo bool) ([]byte, error) readProcFile func(filename string) ([]byte, error) - - Log telegraf.Logger - - forcePS bool - forceProc bool + forcePS bool + forceProc bool } func (p *Processes) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/procstat/filter.go b/plugins/inputs/procstat/filter.go index 3c090549c0718..d8f621048b77e 100644 --- a/plugins/inputs/procstat/filter.go +++ b/plugins/inputs/procstat/filter.go @@ -7,13 +7,13 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" + telegraf_filter "github.com/influxdata/telegraf/filter" ) -type Filter struct { +type filter struct { Name string `toml:"name"` PidFiles []string `toml:"pid_files"` SystemdUnits []string `toml:"systemd_units"` @@ -29,13 +29,13 @@ type Filter struct { filterSupervisorUnit string filterCmds []*regexp.Regexp - filterUser filter.Filter - filterExecutable filter.Filter - filterProcessName filter.Filter + filterUser telegraf_filter.Filter + filterExecutable telegraf_filter.Filter + filterProcessName telegraf_filter.Filter finder *processFinder } -func (f *Filter) Init() error { +func (f *filter) init() error { if f.Name == "" { return errors.New("filter must be named") } @@ -74,13 +74,13 @@ func (f *Filter) Init() error { f.filterSupervisorUnit = strings.TrimSpace(strings.Join(f.SupervisorUnits, " ")) var err error - if f.filterUser, err = filter.Compile(f.Users); err != nil { + if f.filterUser, err = telegraf_filter.Compile(f.Users); err != nil { return fmt.Errorf("compiling users filter for %q failed: %w", f.Name, err) } - if f.filterExecutable, err = filter.Compile(f.Executables); err != nil { + if f.filterExecutable, err = telegraf_filter.Compile(f.Executables); err != nil { return fmt.Errorf("compiling executables filter for %q failed: %w", f.Name, err) } - if f.filterProcessName, err = filter.Compile(f.ProcessNames); err != nil { + if f.filterProcessName, err = telegraf_filter.Compile(f.ProcessNames); err != nil { return fmt.Errorf("compiling process-names filter for %q failed: %w", f.Name, err) } @@ -89,7 +89,7 @@ func (f *Filter) Init() error { return nil } -func (f *Filter) ApplyFilter() ([]processGroup, error) { +func (f *filter) applyFilter() ([]processGroup, error) { // Determine processes on service level. if there is no constraint on the // services, use all processes for matching. var groups []processGroup @@ -125,7 +125,7 @@ func (f *Filter) ApplyFilter() ([]processGroup, error) { } groups = append(groups, g...) default: - procs, err := process.Processes() + procs, err := gopsprocess.Processes() if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (f *Filter) ApplyFilter() ([]processGroup, error) { // Filter by additional properties such as users, patterns etc result := make([]processGroup, 0, len(groups)) for _, g := range groups { - var matched []*process.Process + var matched []*gopsprocess.Process for _, p := range g.processes { // Users if f.filterUser != nil { @@ -218,13 +218,13 @@ func (f *Filter) ApplyFilter() ([]processGroup, error) { return result, nil } -func getChildren(p *process.Process) ([]*process.Process, error) { +func getChildren(p *gopsprocess.Process) ([]*gopsprocess.Process, error) { children, err := p.Children() // Check for cases that do not really mean error but rather means that there // is no match. switch { case err == nil, - errors.Is(err, process.ErrorNoChildren), + errors.Is(err, gopsprocess.ErrorNoChildren), strings.Contains(err.Error(), "exit status 1"): return children, nil } diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 5f9812782b094..192a431acd503 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -7,16 +7,16 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" ) // NativeFinder uses gopsutil to find processes type NativeFinder struct{} // Uid will return all pids for the given user -func (pg *NativeFinder) UID(user string) ([]PID, error) { - var dst []PID - procs, err := process.Processes() +func (pg *NativeFinder) uid(user string) ([]pid, error) { + var dst []pid + procs, err := gopsprocess.Processes() if err != nil { return dst, err } @@ -27,35 +27,35 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) { continue } if username == user { - dst = append(dst, PID(p.Pid)) + dst = append(dst, pid(p.Pid)) } } return dst, nil } // PidFile returns the pid from the pid file given. -func (pg *NativeFinder) PidFile(path string) ([]PID, error) { - var pids []PID +func (pg *NativeFinder) pidFile(path string) ([]pid, error) { + var pids []pid pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("failed to read pidfile %q: %w", path, err) } - pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) + processID, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) if err != nil { return pids, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) return pids, nil } // FullPattern matches on the command line when the process was executed -func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { - var pids []PID +func (pg *NativeFinder) fullPattern(pattern string) ([]pid, error) { + var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.FastProcessList() + procs, err := pg.fastProcessList() if err != nil { return pids, err } @@ -66,18 +66,18 @@ func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { continue } if regxPattern.MatchString(cmd) { - pids = append(pids, PID(p.Pid)) + pids = append(pids, pid(p.Pid)) } } return pids, err } // Children matches children pids on the command line when the process was executed -func (pg *NativeFinder) Children(pid PID) ([]PID, error) { +func (pg *NativeFinder) children(processID pid) ([]pid, error) { // Get all running processes - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(processID)) if err != nil { - return nil, fmt.Errorf("getting process %d failed: %w", pid, err) + return nil, fmt.Errorf("getting process %d failed: %w", processID, err) } // Get all children of the current process @@ -85,35 +85,35 @@ func (pg *NativeFinder) Children(pid PID) ([]PID, error) { if err != nil { return nil, fmt.Errorf("unable to get children of process %d: %w", p.Pid, err) } - pids := make([]PID, 0, len(children)) + pids := make([]pid, 0, len(children)) for _, child := range children { - pids = append(pids, PID(child.Pid)) + pids = append(pids, pid(child.Pid)) } return pids, err } -func (pg *NativeFinder) FastProcessList() ([]*process.Process, error) { - pids, err := process.Pids() +func (pg *NativeFinder) fastProcessList() ([]*gopsprocess.Process, error) { + pids, err := gopsprocess.Pids() if err != nil { return nil, err } - result := make([]*process.Process, 0, len(pids)) + result := make([]*gopsprocess.Process, 0, len(pids)) for _, pid := range pids { - result = append(result, &process.Process{Pid: pid}) + result = append(result, &gopsprocess.Process{Pid: pid}) } return result, nil } // Pattern matches on the process name -func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { - var pids []PID +func (pg *NativeFinder) pattern(pattern string) ([]pid, error) { + var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.FastProcessList() + procs, err := pg.fastProcessList() if err != nil { return pids, err } @@ -124,7 +124,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { continue } if regxPattern.MatchString(name) { - pids = append(pids, PID(p.Pid)) + pids = append(pids, pid(p.Pid)) } } return pids, err diff --git a/plugins/inputs/procstat/native_finder_test.go b/plugins/inputs/procstat/native_finder_test.go index 1e6c6d84ade0c..e4e6e0bb8726d 100644 --- a/plugins/inputs/procstat/native_finder_test.go +++ b/plugins/inputs/procstat/native_finder_test.go @@ -14,7 +14,7 @@ import ( func BenchmarkPattern(b *testing.B) { finder := &NativeFinder{} for n := 0; n < b.N; n++ { - _, err := finder.Pattern(".*") + _, err := finder.pattern(".*") require.NoError(b, err) } } @@ -22,7 +22,7 @@ func BenchmarkPattern(b *testing.B) { func BenchmarkFullPattern(b *testing.B) { finder := &NativeFinder{} for n := 0; n < b.N; n++ { - _, err := finder.FullPattern(".*") + _, err := finder.fullPattern(".*") require.NoError(b, err) } } @@ -37,26 +37,26 @@ func TestChildPattern(t *testing.T) { require.NoError(t, err) // Spawn two child processes and get their PIDs - expected := make([]PID, 0, 2) + expected := make([]pid, 0, 2) ctx, cancel := context.WithCancel(context.Background()) defer cancel() // First process cmd1 := exec.CommandContext(ctx, "/bin/sh") require.NoError(t, cmd1.Start(), "starting first command failed") - expected = append(expected, PID(cmd1.Process.Pid)) + expected = append(expected, pid(cmd1.Process.Pid)) // Second process cmd2 := exec.CommandContext(ctx, "/bin/sh") require.NoError(t, cmd2.Start(), "starting first command failed") - expected = append(expected, PID(cmd2.Process.Pid)) + expected = append(expected, pid(cmd2.Process.Pid)) // Use the plugin to find the children finder := &NativeFinder{} - parent, err := finder.Pattern(parentName) + parent, err := finder.pattern(parentName) require.NoError(t, err) require.Len(t, parent, 1) - children, err := finder.Children(parent[0]) + children, err := finder.children(parent[0]) require.NoError(t, err) require.ElementsMatch(t, expected, children) } @@ -66,7 +66,7 @@ func TestGather_RealPatternIntegration(t *testing.T) { t.Skip("Skipping integration test in short mode") } pg := &NativeFinder{} - pids, err := pg.Pattern(`procstat`) + pids, err := pg.pattern(`procstat`) require.NoError(t, err) require.NotEmpty(t, pids) } @@ -79,7 +79,7 @@ func TestGather_RealFullPatternIntegration(t *testing.T) { t.Skip("Skipping integration test on Non-Windows OS") } pg := &NativeFinder{} - pids, err := pg.FullPattern(`%procstat%`) + pids, err := pg.fullPattern(`%procstat%`) require.NoError(t, err) require.NotEmpty(t, pids) } @@ -92,7 +92,7 @@ func TestGather_RealUserIntegration(t *testing.T) { require.NoError(t, err) pg := &NativeFinder{} - pids, err := pg.UID(currentUser.Username) + pids, err := pg.uid(currentUser.Username) require.NoError(t, err) require.NotEmpty(t, pids) } diff --git a/plugins/inputs/procstat/os_linux.go b/plugins/inputs/procstat/os_linux.go index 6c9d906faa276..cec134ee33232 100644 --- a/plugins/inputs/procstat/os_linux.go +++ b/plugins/inputs/procstat/os_linux.go @@ -13,15 +13,15 @@ import ( "github.com/coreos/go-systemd/v22/dbus" "github.com/prometheus/procfs" - "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsnet "github.com/shirou/gopsutil/v4/net" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" "github.com/influxdata/telegraf/internal" ) -func processName(p *process.Process) (string, error) { +func processName(p *gopsprocess.Process) (string, error) { return p.Exe() } @@ -29,7 +29,7 @@ func queryPidWithWinServiceName(_ string) (uint32, error) { return 0, errors.New("os not supporting win_service option") } -func collectMemmap(proc Process, prefix string, fields map[string]any) { +func collectMemmap(proc process, prefix string, fields map[string]any) { memMapStats, err := proc.MemoryMaps(true) if err == nil && len(*memMapStats) == 1 { memMap := (*memMapStats)[0] @@ -70,12 +70,12 @@ func findBySystemdUnits(units []string) ([]processGroup, error) { if !ok { return nil, fmt.Errorf("failed to parse PID %v of unit %q: invalid type %T", raw, u, raw) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of unit %q: %w", pid, u, err) } groups = append(groups, processGroup{ - processes: []*process.Process{p}, + processes: []*gopsprocess.Process{p}, tags: map[string]string{"systemd_unit": u.Name}, }) } @@ -87,14 +87,14 @@ func findByWindowsServices(_ []string) ([]processGroup, error) { return nil, nil } -func collectTotalReadWrite(proc Process) (r, w uint64, err error) { +func collectTotalReadWrite(proc process) (r, w uint64, err error) { path := internal.GetProcPath() fs, err := procfs.NewFS(path) if err != nil { return 0, 0, err } - p, err := fs.Proc(int(proc.PID())) + p, err := fs.Proc(int(proc.pid())) if err != nil { return 0, 0, err } @@ -177,7 +177,7 @@ func mapFdToInode(pid int32, fd uint32) (uint32, error) { return uint32(inode), nil } -func statsTCP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{}, error) { +func statsTCP(conns []gopsnet.ConnectionStat, family uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -185,7 +185,7 @@ func statsTCP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ // For TCP we need the inode for each connection to relate the connection // statistics to the actual process socket. Therefore, map the // file-descriptors to inodes using the /proc//fd entries. - inodes := make(map[uint32]net.ConnectionStat, len(conns)) + inodes := make(map[uint32]gopsnet.ConnectionStat, len(conns)) for _, c := range conns { inode, err := mapFdToInode(c.Pid, c.Fd) if err != nil { @@ -240,7 +240,7 @@ func statsTCP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ return fieldslist, nil } -func statsUDP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{}, error) { +func statsUDP(conns []gopsnet.ConnectionStat, family uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -248,7 +248,7 @@ func statsUDP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ // For UDP we need the inode for each connection to relate the connection // statistics to the actual process socket. Therefore, map the // file-descriptors to inodes using the /proc//fd entries. - inodes := make(map[uint32]net.ConnectionStat, len(conns)) + inodes := make(map[uint32]gopsnet.ConnectionStat, len(conns)) for _, c := range conns { inode, err := mapFdToInode(c.Pid, c.Fd) if err != nil { @@ -299,7 +299,7 @@ func statsUDP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ return fieldslist, nil } -func statsUnix(conns []net.ConnectionStat) ([]map[string]interface{}, error) { +func statsUnix(conns []gopsnet.ConnectionStat) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -307,7 +307,7 @@ func statsUnix(conns []net.ConnectionStat) ([]map[string]interface{}, error) { // We need to read the inode for each connection to relate the connection // statistics to the actual process socket. Therefore, map the // file-descriptors to inodes using the /proc//fd entries. - inodes := make(map[uint32]net.ConnectionStat, len(conns)) + inodes := make(map[uint32]gopsnet.ConnectionStat, len(conns)) for _, c := range conns { inode, err := mapFdToInode(c.Pid, c.Fd) if err != nil { diff --git a/plugins/inputs/procstat/os_others.go b/plugins/inputs/procstat/os_others.go index 62334f885ccda..ba34038072a21 100644 --- a/plugins/inputs/procstat/os_others.go +++ b/plugins/inputs/procstat/os_others.go @@ -6,11 +6,11 @@ import ( "errors" "syscall" - "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsnet "github.com/shirou/gopsutil/v4/net" + gopsprocess "github.com/shirou/gopsutil/v4/process" ) -func processName(p *process.Process) (string, error) { +func processName(p *gopsprocess.Process) (string, error) { return p.Exe() } @@ -18,7 +18,7 @@ func queryPidWithWinServiceName(string) (uint32, error) { return 0, errors.New("os not supporting win_service option") } -func collectMemmap(Process, string, map[string]any) {} +func collectMemmap(process, string, map[string]any) {} func findBySystemdUnits([]string) ([]processGroup, error) { return nil, nil @@ -28,11 +28,11 @@ func findByWindowsServices([]string) ([]processGroup, error) { return nil, nil } -func collectTotalReadWrite(Process) (r, w uint64, err error) { +func collectTotalReadWrite(process) (r, w uint64, err error) { return 0, 0, errors.ErrUnsupported } -func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsTCP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -65,7 +65,7 @@ func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsUDP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -98,6 +98,6 @@ func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUnix([]net.ConnectionStat) ([]map[string]interface{}, error) { +func statsUnix([]gopsnet.ConnectionStat) ([]map[string]interface{}, error) { return nil, errors.ErrUnsupported } diff --git a/plugins/inputs/procstat/os_windows.go b/plugins/inputs/procstat/os_windows.go index 05ada5a4748bc..b15e424d405f7 100644 --- a/plugins/inputs/procstat/os_windows.go +++ b/plugins/inputs/procstat/os_windows.go @@ -8,13 +8,13 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsnet "github.com/shirou/gopsutil/v4/net" + gopsprocess "github.com/shirou/gopsutil/v4/process" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) -func processName(p *process.Process) (string, error) { +func processName(p *gopsprocess.Process) (string, error) { return p.Name() } @@ -57,7 +57,7 @@ func queryPidWithWinServiceName(winServiceName string) (uint32, error) { return p.ProcessId, nil } -func collectMemmap(Process, string, map[string]any) {} +func collectMemmap(process, string, map[string]any) {} func findBySystemdUnits([]string) ([]processGroup, error) { return nil, nil @@ -71,13 +71,13 @@ func findByWindowsServices(services []string) ([]processGroup, error) { return nil, fmt.Errorf("failed to query PID of service %q: %w", service, err) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of service %q: %w", pid, service, err) } groups = append(groups, processGroup{ - processes: []*process.Process{p}, + processes: []*gopsprocess.Process{p}, tags: map[string]string{"win_service": service}, }) } @@ -85,11 +85,11 @@ func findByWindowsServices(services []string) ([]processGroup, error) { return groups, nil } -func collectTotalReadWrite(Process) (r, w uint64, err error) { +func collectTotalReadWrite(process) (r, w uint64, err error) { return 0, 0, errors.ErrUnsupported } -func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsTCP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -122,7 +122,7 @@ func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsUDP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -155,6 +155,6 @@ func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUnix([]net.ConnectionStat) ([]map[string]interface{}, error) { +func statsUnix([]gopsnet.ConnectionStat) ([]map[string]interface{}, error) { return nil, nil } diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 8451210e94530..add3a2dfb120d 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -11,54 +11,54 @@ import ( ) // Implementation of PIDGatherer that execs pgrep to find processes -type Pgrep struct { +type pgrep struct { path string } -func newPgrepFinder() (PIDFinder, error) { +func newPgrepFinder() (pidFinder, error) { path, err := exec.LookPath("pgrep") if err != nil { return nil, fmt.Errorf("could not find pgrep binary: %w", err) } - return &Pgrep{path}, nil + return &pgrep{path}, nil } -func (pg *Pgrep) PidFile(path string) ([]PID, error) { - var pids []PID +func (pg *pgrep) pidFile(path string) ([]pid, error) { + var pids []pid pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("failed to read pidfile %q: %w", path, err) } - pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) + processID, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) if err != nil { return pids, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) return pids, nil } -func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { +func (pg *pgrep) pattern(pattern string) ([]pid, error) { args := []string{pattern} return pg.find(args) } -func (pg *Pgrep) UID(user string) ([]PID, error) { +func (pg *pgrep) uid(user string) ([]pid, error) { args := []string{"-u", user} return pg.find(args) } -func (pg *Pgrep) FullPattern(pattern string) ([]PID, error) { +func (pg *pgrep) fullPattern(pattern string) ([]pid, error) { args := []string{"-f", pattern} return pg.find(args) } -func (pg *Pgrep) Children(pid PID) ([]PID, error) { +func (pg *pgrep) children(pid pid) ([]pid, error) { args := []string{"-P", strconv.FormatInt(int64(pid), 10)} return pg.find(args) } -func (pg *Pgrep) find(args []string) ([]PID, error) { +func (pg *pgrep) find(args []string) ([]pid, error) { // Execute pgrep with the given arguments buf, err := exec.Command(pg.path, args...).Output() if err != nil { @@ -73,13 +73,13 @@ func (pg *Pgrep) find(args []string) ([]PID, error) { // Parse the command output to extract the PIDs fields := strings.Fields(out) - pids := make([]PID, 0, len(fields)) + pids := make([]pid, 0, len(fields)) for _, field := range fields { - pid, err := strconv.ParseInt(field, 10, 32) + processID, err := strconv.ParseInt(field, 10, 32) if err != nil { return nil, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) } return pids, nil } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index a0e8e60c880f0..c5eeb831d8b73 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -9,41 +9,41 @@ import ( "time" gopsnet "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) -type Process interface { - PID() PID +type process interface { Name() (string, error) - SetTag(string, string) - MemoryMaps(bool) (*[]process.MemoryMapsStat, error) - Metrics(string, *collectionConfig, time.Time) ([]telegraf.Metric, error) + MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) + pid() pid + setTag(string, string) + metrics(string, *collectionConfig, time.Time) ([]telegraf.Metric, error) } -type PIDFinder interface { - PidFile(path string) ([]PID, error) - Pattern(pattern string) ([]PID, error) - UID(user string) ([]PID, error) - FullPattern(path string) ([]PID, error) - Children(pid PID) ([]PID, error) +type pidFinder interface { + pidFile(path string) ([]pid, error) + pattern(pattern string) ([]pid, error) + uid(user string) ([]pid, error) + fullPattern(path string) ([]pid, error) + children(pid pid) ([]pid, error) } -type Proc struct { +type proc struct { hasCPUTimes bool tags map[string]string - *process.Process + *gopsprocess.Process } -func newProc(pid PID) (Process, error) { - p, err := process.NewProcess(int32(pid)) +func newProc(pid pid) (process, error) { + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, err } - proc := &Proc{ + proc := &proc{ Process: p, hasCPUTimes: false, tags: make(map[string]string), @@ -51,15 +51,15 @@ func newProc(pid PID) (Process, error) { return proc, nil } -func (p *Proc) PID() PID { - return PID(p.Process.Pid) +func (p *proc) pid() pid { + return pid(p.Process.Pid) } -func (p *Proc) SetTag(k, v string) { +func (p *proc) setTag(k, v string) { p.tags[k] = v } -func (p *Proc) percent(_ time.Duration) (float64, error) { +func (p *proc) percent(_ time.Duration) (float64, error) { cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true @@ -68,8 +68,8 @@ func (p *Proc) percent(_ time.Duration) (float64, error) { return cpuPerc, err } -// Add metrics a single Process -func (p *Proc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { +// Add metrics a single process +func (p *proc) metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { if prefix != "" { prefix += "_" } @@ -163,27 +163,27 @@ func (p *Proc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([]tel for _, rlim := range rlims { var name string switch rlim.Resource { - case process.RLIMIT_CPU: + case gopsprocess.RLIMIT_CPU: name = "cpu_time" - case process.RLIMIT_DATA: + case gopsprocess.RLIMIT_DATA: name = "memory_data" - case process.RLIMIT_STACK: + case gopsprocess.RLIMIT_STACK: name = "memory_stack" - case process.RLIMIT_RSS: + case gopsprocess.RLIMIT_RSS: name = "memory_rss" - case process.RLIMIT_NOFILE: + case gopsprocess.RLIMIT_NOFILE: name = "num_fds" - case process.RLIMIT_MEMLOCK: + case gopsprocess.RLIMIT_MEMLOCK: name = "memory_locked" - case process.RLIMIT_AS: + case gopsprocess.RLIMIT_AS: name = "memory_vms" - case process.RLIMIT_LOCKS: + case gopsprocess.RLIMIT_LOCKS: name = "file_locks" - case process.RLIMIT_SIGPENDING: + case gopsprocess.RLIMIT_SIGPENDING: name = "signals_pending" - case process.RLIMIT_NICE: + case gopsprocess.RLIMIT_NICE: name = "nice_priority" - case process.RLIMIT_RTPRIO: + case gopsprocess.RLIMIT_RTPRIO: name = "realtime_priority" default: continue diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 4e3e4df6d38c0..6bf1e8402dc69 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -15,7 +15,7 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/choice" @@ -28,14 +28,7 @@ var sampleConfig string // execCommand is so tests can mock out exec.Command usage. var execCommand = exec.Command -type PID int32 - -type collectionConfig struct { - solarisMode bool - tagging map[string]bool - features map[string]bool - socketProtos []string -} +type pid int32 type Procstat struct { PidFinder string `toml:"pid_finder"` @@ -57,24 +50,31 @@ type Procstat struct { Properties []string `toml:"properties"` SocketProtocols []string `toml:"socket_protocols"` TagWith []string `toml:"tag_with"` - Filter []Filter `toml:"filter"` + Filter []filter `toml:"filter"` Log telegraf.Logger `toml:"-"` - finder PIDFinder - processes map[PID]Process + finder pidFinder + processes map[pid]process cfg collectionConfig oldMode bool - createProcess func(PID) (Process, error) + createProcess func(pid) (process, error) +} + +type collectionConfig struct { + solarisMode bool + tagging map[string]bool + features map[string]bool + socketProtos []string } -type PidsTags struct { - PIDs []PID +type pidsTags struct { + PIDs []pid Tags map[string]string } type processGroup struct { - processes []*process.Process + processes []*gopsprocess.Process tags map[string]string } @@ -196,14 +196,14 @@ func (p *Procstat) Init() error { // New-style operations for i := range p.Filter { p.Filter[i].Log = p.Log - if err := p.Filter[i].Init(); err != nil { + if err := p.Filter[i].init(); err != nil { return fmt.Errorf("initializing filter %d failed: %w", i, err) } } } // Initialize the running process cache - p.processes = make(map[PID]Process) + p.processes = make(map[pid]process) return nil } @@ -240,7 +240,7 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { } var count int - running := make(map[PID]bool) + running := make(map[pid]bool) for _, r := range results { if len(r.PIDs) < 1 && len(p.SupervisorUnits) > 0 { continue @@ -271,16 +271,16 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { // Add initial tags for k, v := range r.Tags { - proc.SetTag(k, v) + proc.setTag(k, v) } if p.ProcessName != "" { - proc.SetTag("process_name", p.ProcessName) + proc.setTag("process_name", p.ProcessName) } p.processes[pid] = proc } running[pid] = true - metrics, err := proc.Metrics(p.Prefix, &p.cfg, now) + metrics, err := proc.metrics(p.Prefix, &p.cfg, now) if err != nil { // Continue after logging an error as there might still be // metrics available @@ -324,9 +324,9 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { now := time.Now() - running := make(map[PID]bool) + running := make(map[pid]bool) for _, f := range p.Filter { - groups, err := f.ApplyFilter() + groups, err := f.applyFilter() if err != nil { // Add lookup error-metric acc.AddFields( @@ -357,8 +357,8 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { // Use the cached processes as we need the existing instances // to compute delta-metrics (e.g. cpu-usage). - pid := PID(gp.Pid) - proc, found := p.processes[pid] + pid := pid(gp.Pid) + process, found := p.processes[pid] if !found { //nolint:errcheck // Assumption: if a process has no name, it probably does not exist if name, _ := gp.Name(); name == "" { @@ -372,19 +372,19 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { tags[k] = v } if p.ProcessName != "" { - proc.SetTag("process_name", p.ProcessName) + process.setTag("process_name", p.ProcessName) } tags["filter"] = f.Name - proc = &Proc{ + process = &proc{ Process: gp, hasCPUTimes: false, tags: tags, } - p.processes[pid] = proc + p.processes[pid] = process } running[pid] = true - metrics, err := proc.Metrics(p.Prefix, &p.cfg, now) + metrics, err := process.metrics(p.Prefix, &p.cfg, now) if err != nil { // Continue after logging an error as there might still be // metrics available @@ -422,7 +422,7 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { } // Get matching PIDs and their initial tags -func (p *Procstat) findPids() ([]PidsTags, error) { +func (p *Procstat) findPids() ([]pidsTags, error) { switch { case len(p.SupervisorUnits) > 0: return p.findSupervisorUnits() @@ -434,65 +434,65 @@ func (p *Procstat) findPids() ([]PidsTags, error) { return nil, err } tags := map[string]string{"win_service": p.WinService} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.CGroup != "": return p.cgroupPIDs() case p.PidFile != "": - pids, err := p.finder.PidFile(p.PidFile) + pids, err := p.finder.pidFile(p.PidFile) if err != nil { return nil, err } tags := map[string]string{"pidfile": p.PidFile} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.Exe != "": - pids, err := p.finder.Pattern(p.Exe) + pids, err := p.finder.pattern(p.Exe) if err != nil { return nil, err } tags := map[string]string{"exe": p.Exe} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.Pattern != "": - pids, err := p.finder.FullPattern(p.Pattern) + pids, err := p.finder.fullPattern(p.Pattern) if err != nil { return nil, err } tags := map[string]string{"pattern": p.Pattern} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.User != "": - pids, err := p.finder.UID(p.User) + pids, err := p.finder.uid(p.User) if err != nil { return nil, err } tags := map[string]string{"user": p.User} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil } return nil, errors.New("no filter option set") } -func (p *Procstat) findSupervisorUnits() ([]PidsTags, error) { +func (p *Procstat) findSupervisorUnits() ([]pidsTags, error) { groups, groupsTags, err := p.supervisorPIDs() if err != nil { return nil, fmt.Errorf("getting supervisor PIDs failed: %w", err) } // According to the PID, find the system process number and get the child processes - pidTags := make([]PidsTags, 0, len(groups)) + pidTags := make([]pidsTags, 0, len(groups)) for _, group := range groups { grppid := groupsTags[group]["pid"] if grppid == "" { - pidTags = append(pidTags, PidsTags{nil, groupsTags[group]}) + pidTags = append(pidTags, pidsTags{nil, groupsTags[group]}) continue } - pid, err := strconv.ParseInt(grppid, 10, 32) + processID, err := strconv.ParseInt(grppid, 10, 32) if err != nil { return nil, fmt.Errorf("converting PID %q failed: %w", grppid, err) } // Get all children of the supervisor unit - pids, err := p.finder.Children(PID(pid)) + pids, err := p.finder.children(pid(processID)) if err != nil { - return nil, fmt.Errorf("getting children for %d failed: %w", pid, err) + return nil, fmt.Errorf("getting children for %d failed: %w", processID, err) } tags := map[string]string{"pattern": p.Pattern, "parent_pid": p.Pattern} @@ -510,7 +510,7 @@ func (p *Procstat) findSupervisorUnits() ([]PidsTags, error) { } // Remove duplicate pid tags delete(tags, "pid") - pidTags = append(pidTags, PidsTags{pids, tags}) + pidTags = append(pidTags, pidsTags{pids, tags}) } return pidTags, nil } @@ -559,30 +559,30 @@ func (p *Procstat) supervisorPIDs() ([]string, map[string]map[string]string, err return p.SupervisorUnits, mainPids, nil } -func (p *Procstat) systemdUnitPIDs() ([]PidsTags, error) { +func (p *Procstat) systemdUnitPIDs() ([]pidsTags, error) { if p.IncludeSystemdChildren { p.CGroup = "systemd/system.slice/" + p.SystemdUnit return p.cgroupPIDs() } - var pidTags []PidsTags + var pidTags []pidsTags pids, err := p.simpleSystemdUnitPIDs() if err != nil { return nil, err } tags := map[string]string{"systemd_unit": p.SystemdUnit} - pidTags = append(pidTags, PidsTags{pids, tags}) + pidTags = append(pidTags, pidsTags{pids, tags}) return pidTags, nil } -func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { +func (p *Procstat) simpleSystemdUnitPIDs() ([]pid, error) { out, err := execCommand("systemctl", "show", p.SystemdUnit).Output() if err != nil { return nil, err } lines := bytes.Split(out, []byte{'\n'}) - pids := make([]PID, 0, len(lines)) + pids := make([]pid, 0, len(lines)) for _, line := range lines { kv := bytes.SplitN(line, []byte{'='}, 2) if len(kv) != 2 { @@ -594,17 +594,17 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } - pid, err := strconv.ParseInt(string(kv[1]), 10, 32) + processID, err := strconv.ParseInt(string(kv[1]), 10, 32) if err != nil { return nil, fmt.Errorf("invalid pid %q", kv[1]) } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) } return pids, nil } -func (p *Procstat) cgroupPIDs() ([]PidsTags, error) { +func (p *Procstat) cgroupPIDs() ([]pidsTags, error) { procsPath := p.CGroup if procsPath[0] != '/' { procsPath = "/sys/fs/cgroup/" + procsPath @@ -615,20 +615,20 @@ func (p *Procstat) cgroupPIDs() ([]PidsTags, error) { return nil, fmt.Errorf("glob failed: %w", err) } - pidTags := make([]PidsTags, 0, len(items)) + pidTags := make([]pidsTags, 0, len(items)) for _, item := range items { pids, err := p.singleCgroupPIDs(item) if err != nil { return nil, err } tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} - pidTags = append(pidTags, PidsTags{pids, tags}) + pidTags = append(pidTags, pidsTags{pids, tags}) } return pidTags, nil } -func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { +func (p *Procstat) singleCgroupPIDs(path string) ([]pid, error) { ok, err := isDir(path) if err != nil { return nil, err @@ -643,16 +643,16 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { } lines := bytes.Split(out, []byte{'\n'}) - pids := make([]PID, 0, len(lines)) + pids := make([]pid, 0, len(lines)) for _, pidBS := range lines { if len(pidBS) == 0 { continue } - pid, err := strconv.ParseInt(string(pidBS), 10, 32) + processID, err := strconv.ParseInt(string(pidBS), 10, 32) if err != nil { return nil, fmt.Errorf("invalid pid %q", pidBS) } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) } return pids, nil @@ -666,15 +666,15 @@ func isDir(path string) (bool, error) { return result.IsDir(), nil } -func (p *Procstat) winServicePIDs() ([]PID, error) { - var pids []PID +func (p *Procstat) winServicePIDs() ([]pid, error) { + var pids []pid - pid, err := queryPidWithWinServiceName(p.WinService) + processID, err := queryPidWithWinServiceName(p.WinService) if err != nil { return pids, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) return pids, nil } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index aa833a86f9b24..4256f08e24234 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -77,73 +77,69 @@ TestGather_STARTINGsupervisorUnitPIDs STARTING`) } type testPgrep struct { - pids []PID + pids []pid err error } -func newTestFinder(pids []PID) PIDFinder { +func newTestFinder(pids []pid) pidFinder { return &testPgrep{ pids: pids, err: nil, } } -func (pg *testPgrep) PidFile(_ string) ([]PID, error) { +func (pg *testPgrep) pidFile(_ string) ([]pid, error) { return pg.pids, pg.err } -func (p *testProc) Cmdline() (string, error) { - return "test_proc", nil -} - -func (pg *testPgrep) Pattern(_ string) ([]PID, error) { +func (pg *testPgrep) pattern(_ string) ([]pid, error) { return pg.pids, pg.err } -func (pg *testPgrep) UID(_ string) ([]PID, error) { +func (pg *testPgrep) uid(_ string) ([]pid, error) { return pg.pids, pg.err } -func (pg *testPgrep) FullPattern(_ string) ([]PID, error) { +func (pg *testPgrep) fullPattern(_ string) ([]pid, error) { return pg.pids, pg.err } -func (pg *testPgrep) Children(_ PID) ([]PID, error) { - pids := []PID{7311, 8111, 8112} +func (pg *testPgrep) children(_ pid) ([]pid, error) { + pids := []pid{7311, 8111, 8112} return pids, pg.err } type testProc struct { - pid PID - tags map[string]string + procID pid + tags map[string]string } -func newTestProc(pid PID) (Process, error) { +func newTestProc(pid pid) (process, error) { proc := &testProc{ - pid: pid, - tags: make(map[string]string), + procID: pid, + tags: make(map[string]string), } return proc, nil } -func (p *testProc) PID() PID { - return p.pid +func (p *testProc) pid() pid { + return p.procID } func (p *testProc) Name() (string, error) { return "test_proc", nil } -func (p *testProc) SetTag(k, v string) { +func (p *testProc) setTag(k, v string) { p.tags[k] = v } -func (p *testProc) MemoryMaps(bool) (*[]process.MemoryMapsStat, error) { - stats := make([]process.MemoryMapsStat, 0) +func (p *testProc) MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) { + stats := make([]gopsprocess.MemoryMapsStat, 0) return &stats, nil } -func (p *testProc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { +func (p *testProc) metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { if prefix != "" { prefix += "_" } @@ -190,9 +186,9 @@ func (p *testProc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([ } if cfg.tagging["pid"] { - tags["pid"] = strconv.Itoa(int(p.pid)) + tags["pid"] = strconv.Itoa(int(p.procID)) } else { - fields["pid"] = int32(p.pid) + fields["pid"] = int32(p.procID) } if cfg.tagging["ppid"] { @@ -216,7 +212,7 @@ func (p *testProc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([ return []telegraf.Metric{metric.New("procstat", tags, fields, t)}, nil } -var pid = PID(42) +var processID = pid(42) var exe = "foo" func TestInitInvalidFinder(t *testing.T) { @@ -277,8 +273,8 @@ func TestGather_CreateProcessErrorOk(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), - createProcess: func(PID) (Process, error) { + finder: newTestFinder([]pid{processID}), + createProcess: func(pid) (process, error) { return nil, errors.New("createProcess error") }, } @@ -350,7 +346,7 @@ func TestGather_ProcessName(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -362,14 +358,14 @@ func TestGather_ProcessName(t *testing.T) { } func TestGather_NoProcessNameUsesReal(t *testing.T) { - pid := PID(os.Getpid()) + processID := pid(os.Getpid()) p := Procstat{ Exe: exe, PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -386,7 +382,7 @@ func TestGather_NoPidTag(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -405,7 +401,7 @@ func TestGather_PidTag(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -424,7 +420,7 @@ func TestGather_Prefix(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -441,7 +437,7 @@ func TestGather_Exe(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -460,7 +456,7 @@ func TestGather_User(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -479,7 +475,7 @@ func TestGather_Pattern(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -498,7 +494,7 @@ func TestGather_PidFile(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -510,7 +506,7 @@ func TestGather_PidFile(t *testing.T) { } func TestGather_PercentFirstPass(t *testing.T) { - pid := PID(os.Getpid()) + processID := pid(os.Getpid()) p := Procstat{ Pattern: "foo", @@ -518,7 +514,7 @@ func TestGather_PercentFirstPass(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newProc, } require.NoError(t, p.Init()) @@ -531,7 +527,7 @@ func TestGather_PercentFirstPass(t *testing.T) { } func TestGather_PercentSecondPass(t *testing.T) { - pid := PID(os.Getpid()) + processID := pid(os.Getpid()) p := Procstat{ Pattern: "foo", @@ -539,7 +535,7 @@ func TestGather_PercentSecondPass(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newProc, } require.NoError(t, p.Init()) @@ -558,7 +554,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) @@ -566,7 +562,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) { require.NoError(t, err) for _, pidsTag := range pidsTags { - require.Equal(t, []PID{11408}, pidsTag.PIDs) + require.Equal(t, []pid{11408}, pidsTag.PIDs) require.Equal(t, "TestGather_systemdUnitPIDs", pidsTag.Tags["systemd_unit"]) } } @@ -585,14 +581,14 @@ func TestGather_cgroupPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) pidsTags, err := p.findPids() require.NoError(t, err) for _, pidsTag := range pidsTags { - require.Equal(t, []PID{1234, 5678}, pidsTag.PIDs) + require.Equal(t, []pid{1234, 5678}, pidsTag.PIDs) require.Equal(t, td, pidsTag.Tags["cgroup"]) } } @@ -603,7 +599,7 @@ func TestProcstatLookupMetric(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{543}), + finder: newTestFinder([]pid{543}), createProcess: newProc, } require.NoError(t, p.Init()) @@ -621,7 +617,7 @@ func TestGather_SameTimestamps(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -641,14 +637,14 @@ func TestGather_supervisorUnitPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) pidsTags, err := p.findPids() require.NoError(t, err) for _, pidsTag := range pidsTags { - require.Equal(t, []PID{7311, 8111, 8112}, pidsTag.PIDs) + require.Equal(t, []pid{7311, 8111, 8112}, pidsTag.PIDs) require.Equal(t, "TestGather_supervisorUnitPIDs", pidsTag.Tags["supervisor_unit"]) } } @@ -659,7 +655,7 @@ func TestGather_MoresupervisorUnitPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) diff --git a/plugins/inputs/procstat/service_finders.go b/plugins/inputs/procstat/service_finders.go index 169c64f70957c..df9bc039d326f 100644 --- a/plugins/inputs/procstat/service_finders.go +++ b/plugins/inputs/procstat/service_finders.go @@ -8,8 +8,9 @@ import ( "strconv" "strings" + gopsprocess "github.com/shirou/gopsutil/v4/process" + "github.com/influxdata/telegraf" - "github.com/shirou/gopsutil/v4/process" ) type processFinder struct { @@ -36,13 +37,13 @@ func (f *processFinder) findByPidFiles(paths []string) ([]processGroup, error) { return nil, fmt.Errorf("failed to parse PID in file %q: %w", path, err) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil && !f.errPidFiles[path] { f.log.Errorf("failed to find process for PID %d of file %q: %v", pid, path, err) f.errPidFiles[path] = true } groups = append(groups, processGroup{ - processes: []*process.Process{p}, + processes: []*gopsprocess.Process{p}, tags: map[string]string{"pidfile": path}, }) } @@ -76,7 +77,7 @@ func findByCgroups(cgroups []string) ([]processGroup, error) { return nil, err } lines := bytes.Split(buf, []byte{'\n'}) - procs := make([]*process.Process, 0, len(lines)) + procs := make([]*gopsprocess.Process, 0, len(lines)) for _, l := range lines { l := strings.TrimSpace(string(l)) if len(l) == 0 { @@ -86,7 +87,7 @@ func findByCgroups(cgroups []string) ([]processGroup, error) { if err != nil { return nil, fmt.Errorf("failed to parse PID %q in file %q", l, fpath) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of %q: %w", pid, fpath, err) } @@ -130,7 +131,7 @@ func findBySupervisorUnits(units string) ([]processGroup, error) { "status": status, } - var procs []*process.Process + var procs []*gopsprocess.Process switch status { case "FATAL", "EXITED", "BACKOFF", "STOPPING": tags["error"] = strings.Join(kv[2:], " ") @@ -141,7 +142,7 @@ func findBySupervisorUnits(units string) ([]processGroup, error) { if err != nil { return nil, fmt.Errorf("failed to parse group PID %q: %w", rawpid, err) } - p, err := process.NewProcess(int32(grouppid)) + p, err := gopsprocess.NewProcess(int32(grouppid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of unit %q: %w", grouppid, name, err) } diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go index 431e3231996e0..9020929f598ab 100644 --- a/plugins/inputs/prometheus/consul.go +++ b/plugins/inputs/prometheus/consul.go @@ -14,17 +14,17 @@ import ( "github.com/influxdata/telegraf/config" ) -type ConsulConfig struct { +type consulConfig struct { // Address of the Consul agent. The address must contain a hostname or an IP address // and optionally a port (format: "host:port"). Enabled bool `toml:"enabled"` Agent string `toml:"agent"` QueryInterval config.Duration `toml:"query_interval"` - Queries []*ConsulQuery `toml:"query"` + Queries []*consulQuery `toml:"query"` } // One Consul service discovery query -type ConsulQuery struct { +type consulQuery struct { // A name of the searched services (not ID) ServiceName string `toml:"name"` @@ -128,7 +128,7 @@ func (p *Prometheus) startConsul(ctx context.Context) error { } func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { - consulServiceURLs := make(map[string]URLAndAddress) + consulServiceURLs := make(map[string]urlAndAddress) p.Log.Debugf("Refreshing Consul services") @@ -165,8 +165,8 @@ func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { p.Log.Infof("Created scrape URLs from Consul for Service (%s, %s)", q.ServiceName, q.ServiceTag) } q.lastQueryFailed = false - p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.URL.String()) - consulServiceURLs[uaa.URL.String()] = *uaa + p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.url.String()) + consulServiceURLs[uaa.url.String()] = *uaa } } @@ -177,7 +177,7 @@ func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { return nil } -func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) (*URLAndAddress, error) { +func (p *Prometheus) getConsulServiceURL(q *consulQuery, s *api.CatalogService) (*urlAndAddress, error) { var buffer bytes.Buffer buffer.Reset() err := q.serviceURLTemplate.Execute(&buffer, s) @@ -201,9 +201,9 @@ func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) p.Log.Debugf("Will scrape metrics from Consul Service %s", serviceURL.String()) - return &URLAndAddress{ - URL: serviceURL, - OriginalURL: serviceURL, - Tags: extraTags, + return &urlAndAddress{ + url: serviceURL, + originalURL: serviceURL, + tags: extraTags, }, nil } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index eefe5a215a8cf..2c4ef136c18ca 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -124,11 +124,11 @@ func shouldScrapePod(pod *corev1.Pod, p *Prometheus) bool { var shouldScrape bool switch p.MonitorKubernetesPodsMethod { - case MonitorMethodAnnotations: // must have 'true' annotation to be scraped + case monitorMethodAnnotations: // must have 'true' annotation to be scraped shouldScrape = pod.Annotations != nil && pod.Annotations["prometheus.io/scrape"] == "true" - case MonitorMethodSettings: // will be scraped regardless of annotation + case monitorMethodSettings: // will be scraped regardless of annotation shouldScrape = true - case MonitorMethodSettingsAndAnnotations: // will be scraped unless opts out with 'false' annotation + case monitorMethodSettingsAndAnnotations: // will be scraped unless opts out with 'false' annotation shouldScrape = pod.Annotations == nil || pod.Annotations["prometheus.io/scrape"] != "false" } @@ -194,7 +194,7 @@ func (p *Prometheus) watchPod(ctx context.Context, clientset *kubernetes.Clients if err != nil { p.Log.Errorf("getting key from cache %s", err.Error()) } - podID := PodID(key) + podID := podID(key) if shouldScrapePod(newPod, p) { // When Informers re-Lists, pod might already be registered, // do nothing if it is, register otherwise @@ -209,7 +209,7 @@ func (p *Prometheus) watchPod(ctx context.Context, clientset *kubernetes.Clients DeleteFunc: func(oldObj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(oldObj) if err == nil { - unregisterPod(PodID(key), p) + unregisterPod(podID(key), p) } }, }) @@ -280,7 +280,7 @@ func updateCadvisorPodList(p *Prometheus, req *http.Request) error { // Updating pod list to be latest cadvisor response p.lock.Lock() - p.kubernetesPods = make(map[PodID]URLAndAddress) + p.kubernetesPods = make(map[podID]urlAndAddress) // Register pod only if it has an annotation to scrape, if it is ready, // and if namespace and selectors are specified and match @@ -419,7 +419,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { tags[k] = v } } - podURL := p.AddressToURL(targetURL, targetURL.Hostname()) + podURL := p.addressToURL(targetURL, targetURL.Hostname()) // Locks earlier if using cAdvisor calls - makes a new list each time // rather than updating and removing from the same list @@ -427,12 +427,12 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { p.lock.Lock() defer p.lock.Unlock() } - p.kubernetesPods[PodID(pod.GetNamespace()+"/"+pod.GetName())] = URLAndAddress{ - URL: podURL, - Address: targetURL.Hostname(), - OriginalURL: targetURL, - Tags: tags, - Namespace: pod.GetNamespace(), + p.kubernetesPods[podID(pod.GetNamespace()+"/"+pod.GetName())] = urlAndAddress{ + url: podURL, + address: targetURL.Hostname(), + originalURL: targetURL, + tags: tags, + namespace: pod.GetNamespace(), } } @@ -446,15 +446,15 @@ func getScrapeURL(pod *corev1.Pod, p *Prometheus) (*url.URL, error) { var scheme, pathAndQuery, port string - if p.MonitorKubernetesPodsMethod == MonitorMethodSettings || - p.MonitorKubernetesPodsMethod == MonitorMethodSettingsAndAnnotations { + if p.MonitorKubernetesPodsMethod == monitorMethodSettings || + p.MonitorKubernetesPodsMethod == monitorMethodSettingsAndAnnotations { scheme = p.MonitorKubernetesPodsScheme pathAndQuery = p.MonitorKubernetesPodsPath port = strconv.Itoa(p.MonitorKubernetesPodsPort) } - if p.MonitorKubernetesPodsMethod == MonitorMethodAnnotations || - p.MonitorKubernetesPodsMethod == MonitorMethodSettingsAndAnnotations { + if p.MonitorKubernetesPodsMethod == monitorMethodAnnotations || + p.MonitorKubernetesPodsMethod == monitorMethodSettingsAndAnnotations { if ann := pod.Annotations["prometheus.io/scheme"]; ann != "" { scheme = ann } @@ -489,12 +489,12 @@ func getScrapeURL(pod *corev1.Pod, p *Prometheus) (*url.URL, error) { return base, nil } -func unregisterPod(podID PodID, p *Prometheus) { +func unregisterPod(podID podID, p *Prometheus) { p.lock.Lock() defer p.lock.Unlock() if v, ok := p.kubernetesPods[podID]; ok { p.Log.Debugf("registered a delete request for %s", podID) delete(p.kubernetesPods, podID) - p.Log.Debugf("will stop scraping for %q", v.URL.String()) + p.Log.Debugf("will stop scraping for %q", v.url.String()) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 98be067b395d1..5e2e2e3ca8cfb 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -18,8 +18,8 @@ func initPrometheus() *Prometheus { prom.MonitorKubernetesPodsScheme = "http" prom.MonitorKubernetesPodsPort = 9102 prom.MonitorKubernetesPodsPath = "/metrics" - prom.MonitorKubernetesPodsMethod = MonitorMethodAnnotations - prom.kubernetesPods = map[PodID]URLAndAddress{} + prom.MonitorKubernetesPodsMethod = monitorMethodAnnotations + prom.kubernetesPods = map[podID]urlAndAddress{} return prom } @@ -34,7 +34,7 @@ func TestScrapeURLNoAnnotations(t *testing.T) { func TestScrapeURLNoAnnotationsScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() p.Annotations = map[string]string{} @@ -45,7 +45,7 @@ func TestScrapeURLNoAnnotationsScrapeConfig(t *testing.T) { func TestScrapeURLScrapeConfigCustom(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations prom.MonitorKubernetesPodsScheme = "https" prom.MonitorKubernetesPodsPort = 9999 @@ -66,7 +66,7 @@ func TestScrapeURLAnnotations(t *testing.T) { func TestScrapeURLAnnotationsScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() url, err := getScrapeURL(p, prom) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestScrapeURLAnnotationsCustomPort(t *testing.T) { func TestScrapeURLAnnotationsCustomPortScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() p.Annotations = map[string]string{"prometheus.io/port": "9000"} url, err := getScrapeURL(p, prom) @@ -129,7 +129,7 @@ func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { } func TestAddPod(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -139,7 +139,7 @@ func TestAddPod(t *testing.T) { func TestAddPodScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() p.Annotations = map[string]string{} @@ -148,7 +148,7 @@ func TestAddPodScrapeConfig(t *testing.T) { } func TestAddMultipleDuplicatePods(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -156,13 +156,13 @@ func TestAddMultipleDuplicatePods(t *testing.T) { p.Name = "Pod2" registerPod(p, prom) - urls, err := prom.GetAllURLs() + urls, err := prom.getAllURLs() require.NoError(t, err) require.Len(t, urls, 1) } func TestAddMultiplePods(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -174,41 +174,41 @@ func TestAddMultiplePods(t *testing.T) { } func TestDeletePods(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - podID, err := cache.MetaNamespaceKeyFunc(p) + id, err := cache.MetaNamespaceKeyFunc(p) require.NoError(t, err) - unregisterPod(PodID(podID), prom) + unregisterPod(podID(id), prom) require.Empty(t, prom.kubernetesPods) } func TestKeepDefaultNamespaceLabelName(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - podID, err := cache.MetaNamespaceKeyFunc(p) + id, err := cache.MetaNamespaceKeyFunc(p) require.NoError(t, err) - tags := prom.kubernetesPods[PodID(podID)].Tags + tags := prom.kubernetesPods[podID(id)].tags require.Equal(t, "default", tags["namespace"]) } func TestChangeNamespaceLabelName(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, PodNamespaceLabelName: "pod_namespace", kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, PodNamespaceLabelName: "pod_namespace", kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - podID, err := cache.MetaNamespaceKeyFunc(p) + id, err := cache.MetaNamespaceKeyFunc(p) require.NoError(t, err) - tags := prom.kubernetesPods[PodID(podID)].Tags + tags := prom.kubernetesPods[podID(id)].tags require.Equal(t, "default", tags["pod_namespace"]) require.Equal(t, "", tags["namespace"]) } @@ -300,14 +300,14 @@ func TestAnnotationFilters(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} prom.PodAnnotationInclude = tc.include prom.PodAnnotationExclude = tc.exclude require.NoError(t, prom.initFilters()) registerPod(p, prom) for _, pd := range prom.kubernetesPods { for _, tagKey := range tc.expectedTags { - require.Contains(t, pd.Tags, tagKey) + require.Contains(t, pd.tags, tagKey) } } }) @@ -345,14 +345,14 @@ func TestLabelFilters(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} prom.PodLabelInclude = tc.include prom.PodLabelExclude = tc.exclude require.NoError(t, prom.initFilters()) registerPod(p, prom) for _, pd := range prom.kubernetesPods { for _, tagKey := range tc.expectedTags { - require.Contains(t, pd.Tags, tagKey) + require.Contains(t, pd.tags, tagKey) } } }) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 191d27dd29a58..8b557a9cab979 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -34,18 +34,14 @@ import ( //go:embed sample.conf var sampleConfig string -const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` - -type MonitorMethod string - const ( - MonitorMethodNone MonitorMethod = "" - MonitorMethodAnnotations MonitorMethod = "annotations" - MonitorMethodSettings MonitorMethod = "settings" - MonitorMethodSettingsAndAnnotations MonitorMethod = "settings+annotations" -) + acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` -type PodID string + monitorMethodNone monitorMethod = "" + monitorMethodAnnotations monitorMethod = "annotations" + monitorMethodSettings monitorMethod = "settings" + monitorMethodSettingsAndAnnotations monitorMethod = "settings+annotations" +) type Prometheus struct { URLs []string `toml:"urls"` @@ -72,7 +68,7 @@ type Prometheus struct { KubeConfig string `toml:"kube_config"` KubernetesLabelSelector string `toml:"kubernetes_label_selector"` KubernetesFieldSelector string `toml:"kubernetes_field_selector"` - MonitorKubernetesPodsMethod MonitorMethod `toml:"monitor_kubernetes_pods_method"` + MonitorKubernetesPodsMethod monitorMethod `toml:"monitor_kubernetes_pods_method"` MonitorKubernetesPodsScheme string `toml:"monitor_kubernetes_pods_scheme"` MonitorKubernetesPodsPath string `toml:"monitor_kubernetes_pods_path"` MonitorKubernetesPodsPort int `toml:"monitor_kubernetes_pods_port"` @@ -85,7 +81,7 @@ type Prometheus struct { CacheRefreshInterval int `toml:"cache_refresh_interval"` // Consul discovery - ConsulConfig ConsulConfig `toml:"consul"` + ConsulConfig consulConfig `toml:"consul"` Log telegraf.Logger `toml:"-"` common_http.HTTPClientConfig @@ -100,7 +96,7 @@ type Prometheus struct { // Should we scrape Kubernetes services for prometheus annotations lock sync.Mutex - kubernetesPods map[PodID]URLAndAddress + kubernetesPods map[podID]urlAndAddress cancel context.CancelFunc wg sync.WaitGroup @@ -114,9 +110,21 @@ type Prometheus struct { podLabelExcludeFilter filter.Filter // List of consul services to scrape - consulServices map[string]URLAndAddress + consulServices map[string]urlAndAddress } +type urlAndAddress struct { + originalURL *url.URL + url *url.URL + address string + tags map[string]string + namespace string +} + +type monitorMethod string + +type podID string + func (*Prometheus) SampleConfig() string { return sampleConfig } @@ -164,8 +172,8 @@ func (p *Prometheus) Init() error { p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.") } - if p.MonitorKubernetesPodsMethod == MonitorMethodNone { - p.MonitorKubernetesPodsMethod = MonitorMethodAnnotations + if p.MonitorKubernetesPodsMethod == monitorMethodNone { + p.MonitorKubernetesPodsMethod = monitorMethodAnnotations } // Parse label and field selectors - will be used to filter pods after cAdvisor call @@ -239,11 +247,65 @@ func (p *Prometheus) Init() error { "Accept": acceptHeader, } - p.kubernetesPods = make(map[PodID]URLAndAddress) + p.kubernetesPods = make(map[podID]urlAndAddress) return nil } +// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration +func (p *Prometheus) Start(_ telegraf.Accumulator) error { + var ctx context.Context + p.wg = sync.WaitGroup{} + ctx, p.cancel = context.WithCancel(context.Background()) + + if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { + if err := p.startConsul(ctx); err != nil { + return err + } + } + if p.MonitorPods { + if err := p.startK8s(ctx); err != nil { + return err + } + } + return nil +} + +func (p *Prometheus) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + allURLs, err := p.getAllURLs() + if err != nil { + return err + } + for _, URL := range allURLs { + wg.Add(1) + go func(serviceURL urlAndAddress) { + defer wg.Done() + requestFields, tags, err := p.gatherURL(serviceURL, acc) + acc.AddError(err) + + // Add metrics + if p.EnableRequestMetrics { + acc.AddFields("prometheus_request", requestFields, tags) + } + }(URL) + } + + wg.Wait() + + return nil +} + +func (p *Prometheus) Stop() { + p.cancel() + p.wg.Wait() + + if p.client != nil { + p.client.CloseIdleConnections() + } +} + func (p *Prometheus) initFilters() error { if p.PodAnnotationExclude != nil { podAnnotationExclude, err := filter.Compile(p.PodAnnotationExclude) @@ -276,7 +338,7 @@ func (p *Prometheus) initFilters() error { return nil } -func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { +func (p *Prometheus) addressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { host = address + ":" + u.Port() @@ -295,23 +357,15 @@ func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { return reconstructedURL } -type URLAndAddress struct { - OriginalURL *url.URL - URL *url.URL - Address string - Tags map[string]string - Namespace string -} - -func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { - allURLs := make(map[string]URLAndAddress, len(p.URLs)+len(p.consulServices)+len(p.kubernetesPods)) +func (p *Prometheus) getAllURLs() (map[string]urlAndAddress, error) { + allURLs := make(map[string]urlAndAddress, len(p.URLs)+len(p.consulServices)+len(p.kubernetesPods)) for _, u := range p.URLs { address, err := url.Parse(u) if err != nil { p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } - allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address} + allURLs[address.String()] = urlAndAddress{url: address, originalURL: address} } p.lock.Lock() @@ -322,8 +376,8 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { } // loop through all pods scraped via the prometheus annotation on the pods for _, v := range p.kubernetesPods { - if namespaceAnnotationMatch(v.Namespace, p) { - allURLs[v.URL.String()] = v + if namespaceAnnotationMatch(v.namespace, p) { + allURLs[v.url.String()] = v } } @@ -339,62 +393,34 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { continue } for _, resolved := range resolvedAddresses { - serviceURL := p.AddressToURL(address, resolved) - allURLs[serviceURL.String()] = URLAndAddress{ - URL: serviceURL, - Address: resolved, - OriginalURL: address, + serviceURL := p.addressToURL(address, resolved) + allURLs[serviceURL.String()] = urlAndAddress{ + url: serviceURL, + address: resolved, + originalURL: address, } } } return allURLs, nil } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (p *Prometheus) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup - - allURLs, err := p.GetAllURLs() - if err != nil { - return err - } - for _, URL := range allURLs { - wg.Add(1) - go func(serviceURL URLAndAddress) { - defer wg.Done() - requestFields, tags, err := p.gatherURL(serviceURL, acc) - acc.AddError(err) - - // Add metrics - if p.EnableRequestMetrics { - acc.AddFields("prometheus_request", requestFields, tags) - } - }(URL) - } - - wg.Wait() - - return nil -} - -func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[string]interface{}, map[string]string, error) { +func (p *Prometheus) gatherURL(u urlAndAddress, acc telegraf.Accumulator) (map[string]interface{}, map[string]string, error) { var req *http.Request var uClient *http.Client requestFields := make(map[string]interface{}) - tags := make(map[string]string, len(u.Tags)+2) + tags := make(map[string]string, len(u.tags)+2) if p.URLTag != "" { - tags[p.URLTag] = u.OriginalURL.String() + tags[p.URLTag] = u.originalURL.String() } - if u.Address != "" { - tags["address"] = u.Address + if u.address != "" { + tags["address"] = u.address } - for k, v := range u.Tags { + for k, v := range u.tags { tags[k] = v } - if u.URL.Scheme == "unix" { - path := u.URL.Query().Get("path") + if u.url.Scheme == "unix" { + path := u.url.Query().Get("path") if path == "" { path = "/metrics" } @@ -413,19 +439,19 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s TLSClientConfig: tlsCfg, DisableKeepAlives: true, Dial: func(string, string) (net.Conn, error) { - c, err := net.Dial("unix", u.URL.Path) + c, err := net.Dial("unix", u.url.Path) return c, err }, }, } } else { - if u.URL.Path == "" { - u.URL.Path = "/metrics" + if u.url.Path == "" { + u.url.Path = "/metrics" } var err error - req, err = http.NewRequest("GET", u.URL.String(), nil) + req, err = http.NewRequest("GET", u.url.String(), nil) if err != nil { - return nil, nil, fmt.Errorf("unable to create new request %q: %w", u.URL.String(), err) + return nil, nil, fmt.Errorf("unable to create new request %q: %w", u.url.String(), err) } } @@ -469,7 +495,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s var err error var resp *http.Response var start time.Time - if u.URL.Scheme != "unix" { + if u.url.Scheme != "unix" { start = time.Now() //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = p.client.Do(req) @@ -480,14 +506,14 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s } end := time.Since(start).Seconds() if err != nil { - return requestFields, tags, fmt.Errorf("error making HTTP request to %q: %w", u.URL, err) + return requestFields, tags, fmt.Errorf("error making HTTP request to %q: %w", u.url, err) } requestFields["response_time"] = end defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return requestFields, tags, fmt.Errorf("%q returned HTTP status %q", u.URL, resp.Status) + return requestFields, tags, fmt.Errorf("%q returned HTTP status %q", u.url, resp.Status) } var body []byte @@ -504,7 +530,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s return requestFields, tags, fmt.Errorf("error reading body: %w", err) } if int64(len(body)) > limit { - p.Log.Infof("skipping %s: content length exceeded maximum body size (%d)", u.URL, limit) + p.Log.Infof("skipping %s: content length exceeded maximum body size (%d)", u.url, limit) return requestFields, tags, nil } } else { @@ -539,20 +565,20 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s } metrics, err := metricParser.Parse(body) if err != nil { - return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.URL, err) + return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.url, err) } for _, metric := range metrics { tags := metric.Tags() // strip user and password from URL - u.OriginalURL.User = nil + u.originalURL.User = nil if p.URLTag != "" { - tags[p.URLTag] = u.OriginalURL.String() + tags[p.URLTag] = u.originalURL.String() } - if u.Address != "" { - tags["address"] = u.Address + if u.address != "" { + tags["address"] = u.address } - for k, v := range u.Tags { + for k, v := range u.tags { tags[k] = v } @@ -603,39 +629,11 @@ func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { return true, "" } -// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration -func (p *Prometheus) Start(_ telegraf.Accumulator) error { - var ctx context.Context - p.wg = sync.WaitGroup{} - ctx, p.cancel = context.WithCancel(context.Background()) - - if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { - if err := p.startConsul(ctx); err != nil { - return err - } - } - if p.MonitorPods { - if err := p.startK8s(ctx); err != nil { - return err - } - } - return nil -} - -func (p *Prometheus) Stop() { - p.cancel() - p.wg.Wait() - - if p.client != nil { - p.client.CloseIdleConnections() - } -} - func init() { inputs.Add("prometheus", func() telegraf.Input { return &Prometheus{ - kubernetesPods: make(map[PodID]URLAndAddress), - consulServices: make(map[string]URLAndAddress), + kubernetesPods: make(map[podID]urlAndAddress), + consulServices: make(map[string]urlAndAddress), URLTag: "url", } }) diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 995ec9c8dcb8c..cdb723da3d357 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -630,7 +630,7 @@ func TestInitConfigSelectors(t *testing.T) { URLs: nil, URLTag: "url", MonitorPods: true, - MonitorKubernetesPodsMethod: MonitorMethodSettings, + MonitorKubernetesPodsMethod: monitorMethodSettings, PodScrapeInterval: 60, KubernetesLabelSelector: "app=test", KubernetesFieldSelector: "spec.nodeName=node-0", diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 22729e5ce76c2..7d4cebf2b9a95 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -23,18 +23,6 @@ func (*Proxmox) SampleConfig() string { return sampleConfig } -func (px *Proxmox) Gather(acc telegraf.Accumulator) error { - err := getNodeSearchDomain(px) - if err != nil { - return err - } - - gatherLxcData(px, acc) - gatherQemuData(px, acc) - - return nil -} - func (px *Proxmox) Init() error { // Set hostname as default node name for backwards compatibility if px.NodeName == "" { @@ -57,12 +45,16 @@ func (px *Proxmox) Init() error { return nil } -func init() { - inputs.Add("proxmox", func() telegraf.Input { - return &Proxmox{ - requestFunction: performRequest, - } - }) +func (px *Proxmox) Gather(acc telegraf.Accumulator) error { + err := getNodeSearchDomain(px) + if err != nil { + return err + } + + gatherLxcData(px, acc) + gatherQemuData(px, acc) + + return nil } func getNodeSearchDomain(px *Proxmox) error { @@ -274,3 +266,11 @@ func getTags(px *Proxmox, name string, vmConfig vmConfig, rt resourceType) map[s "vm_type": string(rt), } } + +func init() { + inputs.Add("proxmox", func() telegraf.Input { + return &Proxmox{ + requestFunction: performRequest, + } + }) +} diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index 941af52fb8a2b..47b6856f61e86 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -10,28 +10,28 @@ import ( "github.com/influxdata/telegraf/plugins/common/tls" ) +var ( + qemu resourceType = "qemu" + lxc resourceType = "lxc" +) + type Proxmox struct { BaseURL string `toml:"base_url"` APIToken string `toml:"api_token"` ResponseTimeout config.Duration `toml:"response_timeout"` NodeName string `toml:"node_name"` - tls.ClientConfig - httpClient *http.Client - nodeSearchDomain string + Log telegraf.Logger `toml:"-"` - requestFunction func(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) - Log telegraf.Logger `toml:"-"` + httpClient *http.Client + + nodeSearchDomain string + requestFunction func(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) } type resourceType string -var ( - qemu resourceType = "qemu" - lxc resourceType = "lxc" -) - type vmStats struct { Data []vmStat `json:"data"` } diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index f4332858d9d29..d7cc5d882d877 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -17,12 +17,11 @@ import ( //go:embed sample.conf var sampleConfig string -// PuppetAgent is a PuppetAgent plugin type PuppetAgent struct { - Location string + Location string `toml:"location"` } -type State struct { +type state struct { Events event Resources resource Changes change @@ -101,7 +100,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return err } - var puppetState State + var puppetState state err = yaml.Unmarshal(fh, &puppetState) if err != nil { @@ -114,7 +113,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return nil } -func structPrinter(s *State, acc telegraf.Accumulator, tags map[string]string) { +func structPrinter(s *state, acc telegraf.Accumulator, tags map[string]string) { e := reflect.ValueOf(s).Elem() fields := make(map[string]interface{}) From 1c3abfed7546d59e63494f16b0061006f0b40acf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 20:58:11 +0100 Subject: [PATCH 095/170] chore(deps): Bump github.com/Azure/go-autorest/autorest/adal from 0.9.23 to 0.9.24 (#16315) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 584286c7008ff..a55322b9d062e 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20230531184854-c06a8eff66fe github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/adal v0.9.23 + github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 github.com/BurntSushi/toml v1.4.0 github.com/ClickHouse/clickhouse-go v1.5.4 diff --git a/go.sum b/go.sum index c11b686ddd6af..9a8cdfa42f411 100644 --- a/go.sum +++ b/go.sum @@ -689,8 +689,8 @@ github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/ github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= From e766c86a0ff89e8a088d0d1b798c8cedf484f66e Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 18 Dec 2024 15:48:03 +0100 Subject: [PATCH 096/170] chore(deps): Bump github.com/vapourismo/knx-go from v0.0.0-20240217175130-922a0d50c241 to v0.0.0-20240915133544-a6ab43471c11 (#16324) --- go.mod | 2 +- go.sum | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a55322b9d062e..fd1daea38d548 100644 --- a/go.mod +++ b/go.mod @@ -195,7 +195,7 @@ require ( github.com/tidwall/wal v1.1.7 github.com/tinylib/msgp v1.2.0 github.com/urfave/cli/v2 v2.27.2 - github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241 + github.com/vapourismo/knx-go v0.0.0-20240915133544-a6ab43471c11 github.com/vishvananda/netlink v1.3.0 github.com/vishvananda/netns v0.0.5 github.com/vjeantet/grok v1.0.1 diff --git a/go.sum b/go.sum index 9a8cdfa42f411..b982de44cc6b4 100644 --- a/go.sum +++ b/go.sum @@ -2371,8 +2371,8 @@ github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241 h1:3r4OPQ/jPYQA0C7i149kevHLGSG4JZtrQv2986fXSCo= -github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241/go.mod h1:aGkV5xHz9sBkAckp2hez7khfehKp4YvyBwAmVdVEulg= +github.com/vapourismo/knx-go v0.0.0-20240915133544-a6ab43471c11 h1:YzrpNqpAuAgUQ0vseiI3mAVz7zr0rM5LWdaGCCr6Ipc= +github.com/vapourismo/knx-go v0.0.0-20240915133544-a6ab43471c11/go.mod h1:+iC7aAxEwuJ4mvdKaY0zCGT0dpIC/AtHt4yv2jr5FOo= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= @@ -2705,6 +2705,7 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= From d829a5b29cbb06f53c7aa92d11d803a237d87c16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 18 Dec 2024 19:47:47 +0100 Subject: [PATCH 097/170] chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[l-r]` (#16325) --- plugins/inputs/lanz/lanz.go | 2 +- plugins/inputs/leofs/leofs.go | 6 +-- plugins/inputs/libvirt/libvirt.go | 2 +- .../inputs/libvirt/libvirt_metric_format.go | 40 +++++++++---------- plugins/inputs/linux_cpu/linux_cpu.go | 2 +- plugins/inputs/logstash/logstash.go | 25 +++++------- plugins/inputs/lvm/lvm.go | 4 -- plugins/inputs/mcrouter/mcrouter.go | 8 ++-- plugins/inputs/mcrouter/mcrouter_test.go | 8 +--- plugins/inputs/modbus/configuration_metric.go | 6 +-- .../inputs/modbus/configuration_register.go | 18 ++++----- .../inputs/modbus/configuration_request.go | 10 ++--- plugins/inputs/modbus/modbus.go | 10 ++--- plugins/inputs/monit/monit_test.go | 4 +- .../mqtt_consumer/mqtt_consumer_test.go | 22 +++++----- plugins/inputs/mysql/mysql.go | 30 +++++++------- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- plugins/inputs/neptune_apex/neptune_apex.go | 4 +- .../inputs/neptune_apex/neptune_apex_test.go | 6 +-- plugins/inputs/netflow/netflow.go | 2 +- plugins/inputs/netflow/netflow_v5.go | 4 +- .../nginx_upstream_check.go | 4 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 2 +- plugins/inputs/nvidia_smi/nvidia_smi.go | 2 +- plugins/inputs/opentelemetry/writer.go | 2 +- plugins/inputs/pf/pf.go | 4 +- plugins/inputs/phpfpm/fcgi_test.go | 6 +-- plugins/inputs/phpfpm/phpfpm_test.go | 2 +- .../inputs/powerdns/powerdns_linux_test.go | 7 +--- .../powerdns_recursor/powerdns_recursor.go | 2 +- .../inputs/powerdns_recursor/protocol_v3.go | 2 +- plugins/inputs/procstat/native_finder.go | 16 ++++---- plugins/inputs/procstat/pgrep.go | 2 +- plugins/inputs/procstat/procstat.go | 4 +- plugins/inputs/procstat/procstat_test.go | 4 +- plugins/inputs/prometheus/kubernetes.go | 2 +- plugins/inputs/prometheus/prometheus.go | 4 +- plugins/inputs/radius/radius.go | 2 +- plugins/inputs/raindrops/raindrops.go | 4 +- plugins/inputs/raindrops/raindrops_test.go | 3 +- plugins/inputs/redis/redis.go | 6 +-- plugins/inputs/redis/redis_test.go | 8 ++-- plugins/inputs/rethinkdb/rethinkdb.go | 6 +-- .../riemann_listener/riemann_listener.go | 2 +- 44 files changed, 145 insertions(+), 166 deletions(-) diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index bb2040d317cc9..a9589bb701777 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -58,7 +58,7 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error { return nil } -func (l *Lanz) Gather(_ telegraf.Accumulator) error { +func (*Lanz) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 3845384a19179..19e0878b2f608 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -159,7 +159,7 @@ func (*LeoFS) SampleConfig() string { func (l *LeoFS) Gather(acc telegraf.Accumulator) error { if len(l.Servers) == 0 { - return l.gatherServer(defaultEndpoint, serverTypeManagerMaster, acc) + return gatherServer(defaultEndpoint, serverTypeManagerMaster, acc) } var wg sync.WaitGroup for _, endpoint := range l.Servers { @@ -185,14 +185,14 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(endpoint string, st serverType) { defer wg.Done() - acc.AddError(l.gatherServer(endpoint, st, acc)) + acc.AddError(gatherServer(endpoint, st, acc)) }(endpoint, st) } wg.Wait() return nil } -func (l *LeoFS) gatherServer(endpoint string, serverType serverType, acc telegraf.Accumulator) error { +func gatherServer(endpoint string, serverType serverType, acc telegraf.Accumulator) error { cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", "-On", endpoint, oid) stdout, err := cmd.StdoutPipe() if err != nil { diff --git a/plugins/inputs/libvirt/libvirt.go b/plugins/inputs/libvirt/libvirt.go index 4a32eaf761773..a1fe4363f205b 100644 --- a/plugins/inputs/libvirt/libvirt.go +++ b/plugins/inputs/libvirt/libvirt.go @@ -47,7 +47,7 @@ type Libvirt struct { domainsMap map[string]struct{} } -func (l *Libvirt) SampleConfig() string { +func (*Libvirt) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/libvirt/libvirt_metric_format.go b/plugins/inputs/libvirt/libvirt_metric_format.go index 91946ed2cc66e..c87ebe1c880dd 100644 --- a/plugins/inputs/libvirt/libvirt_metric_format.go +++ b/plugins/inputs/libvirt/libvirt_metric_format.go @@ -17,31 +17,31 @@ var ( ) func (l *Libvirt) addMetrics(stats []golibvirt.DomainStatsRecord, vcpuInfos map[string][]vcpuAffinity, acc telegraf.Accumulator) { - domainsMetrics := l.translateMetrics(stats) + domainsMetrics := translateMetrics(stats) for domainName, metrics := range domainsMetrics { for metricType, values := range metrics { switch metricType { case "state": - l.addStateMetrics(values, domainName, acc) + addStateMetrics(values, domainName, acc) case "cpu": - l.addCPUMetrics(values, domainName, acc) + addCPUMetrics(values, domainName, acc) case "balloon": - l.addBalloonMetrics(values, domainName, acc) + addBalloonMetrics(values, domainName, acc) case "vcpu": l.addVcpuMetrics(values, domainName, vcpuInfos[domainName], acc) case "net": - l.addInterfaceMetrics(values, domainName, acc) + addInterfaceMetrics(values, domainName, acc) case "perf": - l.addPerfMetrics(values, domainName, acc) + addPerfMetrics(values, domainName, acc) case "block": - l.addBlockMetrics(values, domainName, acc) + addBlockMetrics(values, domainName, acc) case "iothread": - l.addIothreadMetrics(values, domainName, acc) + addIothreadMetrics(values, domainName, acc) case "memory": - l.addMemoryMetrics(values, domainName, acc) + addMemoryMetrics(values, domainName, acc) case "dirtyrate": - l.addDirtyrateMetrics(values, domainName, acc) + addDirtyrateMetrics(values, domainName, acc) } } } @@ -61,7 +61,7 @@ func (l *Libvirt) addMetrics(stats []golibvirt.DomainStatsRecord, vcpuInfos map[ } } -func (l *Libvirt) translateMetrics(stats []golibvirt.DomainStatsRecord) map[string]map[string]map[string]golibvirt.TypedParamValue { +func translateMetrics(stats []golibvirt.DomainStatsRecord) map[string]map[string]map[string]golibvirt.TypedParamValue { metrics := make(map[string]map[string]map[string]golibvirt.TypedParamValue) for _, stat := range stats { if stat.Params != nil { @@ -83,7 +83,7 @@ func (l *Libvirt) translateMetrics(stats []golibvirt.DomainStatsRecord) map[stri return metrics } -func (l *Libvirt) addStateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addStateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var stateFields = make(map[string]interface{}) var stateTags = map[string]string{ "domain_name": domainName, @@ -101,7 +101,7 @@ func (l *Libvirt) addStateMetrics(metrics map[string]golibvirt.TypedParamValue, } } -func (l *Libvirt) addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var cpuFields = make(map[string]interface{}) var cpuCacheMonitorTotalFields = make(map[string]interface{}) @@ -188,7 +188,7 @@ func (l *Libvirt) addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, do } } -func (l *Libvirt) addBalloonMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addBalloonMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var balloonFields = make(map[string]interface{}) var balloonTags = map[string]string{ "domain_name": domainName, @@ -283,7 +283,7 @@ func (l *Libvirt) getCurrentPCPUForVCPU(vcpuID string, vcpuInfos []vcpuAffinity) return -1 } -func (l *Libvirt) addInterfaceMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addInterfaceMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var netTotalFields = make(map[string]interface{}) var netData = make(map[string]map[string]interface{}) @@ -330,7 +330,7 @@ func (l *Libvirt) addInterfaceMetrics(metrics map[string]golibvirt.TypedParamVal } } -func (l *Libvirt) addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var perfFields = make(map[string]interface{}) var perfTags = map[string]string{ "domain_name": domainName, @@ -351,7 +351,7 @@ func (l *Libvirt) addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, d } } -func (l *Libvirt) addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var blockTotalFields = make(map[string]interface{}) var blockData = make(map[string]map[string]interface{}) @@ -399,7 +399,7 @@ func (l *Libvirt) addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, } } -func (l *Libvirt) addIothreadMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addIothreadMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var iothreadTotalFields = make(map[string]interface{}) var iothreadData = make(map[string]map[string]interface{}) @@ -446,7 +446,7 @@ func (l *Libvirt) addIothreadMetrics(metrics map[string]golibvirt.TypedParamValu } } -func (l *Libvirt) addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var memoryBandwidthMonitorTotalFields = make(map[string]interface{}) var memoryBandwidthMonitorData = make(map[string]map[string]interface{}) @@ -528,7 +528,7 @@ func (l *Libvirt) addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, } } -func (l *Libvirt) addDirtyrateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addDirtyrateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var dirtyrateFields = make(map[string]interface{}) var dirtyrateVcpuData = make(map[string]map[string]interface{}) diff --git a/plugins/inputs/linux_cpu/linux_cpu.go b/plugins/inputs/linux_cpu/linux_cpu.go index e7839d4e172c7..1457184c9da06 100644 --- a/plugins/inputs/linux_cpu/linux_cpu.go +++ b/plugins/inputs/linux_cpu/linux_cpu.go @@ -47,7 +47,7 @@ type prop struct { optional bool } -func (g *LinuxCPU) SampleConfig() string { +func (*LinuxCPU) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index da65773c46f39..4fe48035e7a5c 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -283,12 +283,7 @@ func (logstash *Logstash) gatherProcessStats(address string, accumulator telegra } // gatherPluginsStats go through a list of plugins and add their metrics to the accumulator -func (logstash *Logstash) gatherPluginsStats( - plugins []plugin, - pluginType string, - tags map[string]string, - accumulator telegraf.Accumulator, -) error { +func gatherPluginsStats(plugins []plugin, pluginType string, tags map[string]string, accumulator telegraf.Accumulator) error { for _, plugin := range plugins { pluginTags := map[string]string{ "plugin_name": plugin.Name, @@ -370,7 +365,7 @@ func (logstash *Logstash) gatherPluginsStats( return nil } -func (logstash *Logstash) gatherQueueStats(queue pipelineQueue, tags map[string]string, acc telegraf.Accumulator) error { +func gatherQueueStats(queue pipelineQueue, tags map[string]string, acc telegraf.Accumulator) error { queueTags := map[string]string{ "queue_type": queue.Type, } @@ -438,20 +433,20 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr } accumulator.AddFields("logstash_events", flattener.Fields, tags) - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) + err = gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) + err = gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) + err = gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) if err != nil { return err } - err = logstash.gatherQueueStats(pipelineStats.Pipeline.Queue, tags, accumulator) + err = gatherQueueStats(pipelineStats.Pipeline.Queue, tags, accumulator) if err != nil { return err } @@ -484,20 +479,20 @@ func (logstash *Logstash) gatherPipelinesStats(address string, accumulator teleg } accumulator.AddFields("logstash_events", flattener.Fields, tags) - err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) + err = gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) + err = gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) + err = gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) if err != nil { return err } - err = logstash.gatherQueueStats(pipeline.Queue, tags, accumulator) + err = gatherQueueStats(pipeline.Queue, tags, accumulator) if err != nil { return err } diff --git a/plugins/inputs/lvm/lvm.go b/plugins/inputs/lvm/lvm.go index 0efb7270b9d5d..e1c246dddf7f9 100644 --- a/plugins/inputs/lvm/lvm.go +++ b/plugins/inputs/lvm/lvm.go @@ -33,10 +33,6 @@ func (*LVM) SampleConfig() string { return sampleConfig } -func (lvm *LVM) Init() error { - return nil -} - func (lvm *LVM) Gather(acc telegraf.Accumulator) error { if err := lvm.gatherPhysicalVolumes(acc); err != nil { return err diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index 37202fa300db0..0cf91573a88f8 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -128,14 +128,14 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } for _, serverAddress := range m.Servers { - acc.AddError(m.gatherServer(ctx, serverAddress, acc)) + acc.AddError(gatherServer(ctx, serverAddress, acc)) } return nil } // parseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) parseAddress(address string) (parsedAddress, protocol string, err error) { +func parseAddress(address string) (parsedAddress, protocol string, err error) { var host string var port string @@ -181,13 +181,13 @@ func (m *Mcrouter) parseAddress(address string) (parsedAddress, protocol string, return parsedAddress, protocol, nil } -func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { +func gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { var conn net.Conn var err error var protocol string var dialer net.Dialer - address, protocol, err = m.parseAddress(address) + address, protocol, err = parseAddress(address) if err != nil { return err } diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index 47f658d256afa..a0d1414ff7d0b 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -15,10 +15,6 @@ import ( ) func TestAddressParsing(t *testing.T) { - m := &Mcrouter{ - Servers: []string{"tcp://" + testutil.GetLocalHost()}, - } - var acceptTests = [][3]string{ {"tcp://localhost:8086", "localhost:8086", "tcp"}, {"tcp://localhost", "localhost:" + defaultServerURL.Port(), "tcp"}, @@ -32,7 +28,7 @@ func TestAddressParsing(t *testing.T) { } for _, args := range acceptTests { - address, protocol, err := m.parseAddress(args[0]) + address, protocol, err := parseAddress(args[0]) require.NoError(t, err, args[0]) require.Equal(t, args[1], address, args[0]) @@ -40,7 +36,7 @@ func TestAddressParsing(t *testing.T) { } for _, addr := range rejectTests { - address, protocol, err := m.parseAddress(addr) + address, protocol, err := parseAddress(addr) require.Error(t, err, addr) require.Empty(t, address, addr) diff --git a/plugins/inputs/modbus/configuration_metric.go b/plugins/inputs/modbus/configuration_metric.go index c0301728e0e39..959690a1eb89f 100644 --- a/plugins/inputs/modbus/configuration_metric.go +++ b/plugins/inputs/modbus/configuration_metric.go @@ -42,7 +42,7 @@ type configurationPerMetric struct { logger telegraf.Logger } -func (c *configurationPerMetric) sampleConfigPart() string { +func (*configurationPerMetric) sampleConfigPart() string { return sampleConfigPartPerMetric } @@ -366,7 +366,7 @@ func (c *configurationPerMetric) fieldID(seed maphash.Seed, def metricDefinition return mh.Sum64() } -func (c *configurationPerMetric) determineOutputDatatype(input string) (string, error) { +func (*configurationPerMetric) determineOutputDatatype(input string) (string, error) { // Handle our special types switch input { case "INT8L", "INT8H", "INT16", "INT32", "INT64": @@ -381,7 +381,7 @@ func (c *configurationPerMetric) determineOutputDatatype(input string) (string, return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) } -func (c *configurationPerMetric) determineFieldLength(input string, length uint16) (uint16, error) { +func (*configurationPerMetric) determineFieldLength(input string, length uint16) (uint16, error) { // Handle our special types switch input { case "BIT", "INT8L", "INT8H", "UINT8L", "UINT8H": diff --git a/plugins/inputs/modbus/configuration_register.go b/plugins/inputs/modbus/configuration_register.go index 9bd70caca6caa..9d47af5553298 100644 --- a/plugins/inputs/modbus/configuration_register.go +++ b/plugins/inputs/modbus/configuration_register.go @@ -31,7 +31,7 @@ type configurationOriginal struct { logger telegraf.Logger } -func (c *configurationOriginal) sampleConfigPart() string { +func (*configurationOriginal) sampleConfigPart() string { return sampleConfigPartPerRegister } @@ -43,19 +43,19 @@ func (c *configurationOriginal) check() error { return fmt.Errorf("invalid 'string_register_location' %q", c.workarounds.StringRegisterLocation) } - if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { + if err := validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { return err } - if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { + if err := validateFieldDefinitions(c.Coils, cCoils); err != nil { return err } - if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { + if err := validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { return err } - return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) + return validateFieldDefinitions(c.InputRegisters, cInputRegisters) } func (c *configurationOriginal) process() (map[byte]requestSet, error) { @@ -182,7 +182,7 @@ func (c *configurationOriginal) newFieldFromDefinition(def fieldDefinition, type return f, nil } -func (c *configurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { +func validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { nameEncountered := make(map[string]bool, len(fieldDefs)) for _, item := range fieldDefs { // check empty name @@ -276,7 +276,7 @@ func (c *configurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini return nil } -func (c *configurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { +func (*configurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { if dataType == "FLOAT32" { config.PrintOptionValueDeprecationNotice("input.modbus", "data_type", "FLOAT32", telegraf.DeprecationInfo{ Since: "1.16.0", @@ -323,7 +323,7 @@ func (c *configurationOriginal) normalizeInputDatatype(dataType string, words in return normalizeInputDatatype(dataType) } -func (c *configurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { +func (*configurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { // Handle our special types switch dataType { case "FIXED", "FLOAT32", "UFIXED": @@ -332,7 +332,7 @@ func (c *configurationOriginal) normalizeOutputDatatype(dataType string) (string return normalizeOutputDatatype("native") } -func (c *configurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { +func (*configurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { // Handle our special types switch byteOrder { case "AB", "ABCDEFGH": diff --git a/plugins/inputs/modbus/configuration_request.go b/plugins/inputs/modbus/configuration_request.go index 6288b0c1b5f99..13cfc36c3d710 100644 --- a/plugins/inputs/modbus/configuration_request.go +++ b/plugins/inputs/modbus/configuration_request.go @@ -45,7 +45,7 @@ type configurationPerRequest struct { logger telegraf.Logger } -func (c *configurationPerRequest) sampleConfigPart() string { +func (*configurationPerRequest) sampleConfigPart() string { return sampleConfigPartPerRequest } @@ -300,7 +300,7 @@ func (c *configurationPerRequest) newFieldFromDefinition(def requestFieldDefinit fieldLength := uint16(1) if typed { - if fieldLength, err = c.determineFieldLength(def.InputType, def.Length); err != nil { + if fieldLength, err = determineFieldLength(def.InputType, def.Length); err != nil { return field{}, err } } @@ -338,7 +338,7 @@ func (c *configurationPerRequest) newFieldFromDefinition(def requestFieldDefinit // For non-scaling cases we should choose the output corresponding to the input class // i.e. INT64 for INT*, UINT64 for UINT* etc. var err error - if def.OutputType, err = c.determineOutputDatatype(def.InputType); err != nil { + if def.OutputType, err = determineOutputDatatype(def.InputType); err != nil { return field{}, err } } else { @@ -406,7 +406,7 @@ func (c *configurationPerRequest) fieldID(seed maphash.Seed, def requestDefiniti return mh.Sum64() } -func (c *configurationPerRequest) determineOutputDatatype(input string) (string, error) { +func determineOutputDatatype(input string) (string, error) { // Handle our special types switch input { case "INT8L", "INT8H", "INT16", "INT32", "INT64": @@ -421,7 +421,7 @@ func (c *configurationPerRequest) determineOutputDatatype(input string) (string, return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) } -func (c *configurationPerRequest) determineFieldLength(input string, length uint16) (uint16, error) { +func determineFieldLength(input string, length uint16) (uint16, error) { // Handle our special types switch input { case "BIT", "INT8L", "INT8H", "UINT8L", "UINT8H": diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 0d95d3987ced6..eeb6577a8c5ae 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -251,22 +251,22 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { if !m.ExcludeRegisterTypeTag { tags["type"] = cCoils } - m.collectFields(grouper, timestamp, tags, requests.coil) + collectFields(grouper, timestamp, tags, requests.coil) if !m.ExcludeRegisterTypeTag { tags["type"] = cDiscreteInputs } - m.collectFields(grouper, timestamp, tags, requests.discrete) + collectFields(grouper, timestamp, tags, requests.discrete) if !m.ExcludeRegisterTypeTag { tags["type"] = cHoldingRegisters } - m.collectFields(grouper, timestamp, tags, requests.holding) + collectFields(grouper, timestamp, tags, requests.holding) if !m.ExcludeRegisterTypeTag { tags["type"] = cInputRegisters } - m.collectFields(grouper, timestamp, tags, requests.input) + collectFields(grouper, timestamp, tags, requests.input) // Add the metrics grouped by series to the accumulator for _, x := range grouper.Metrics() { @@ -532,7 +532,7 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { return nil } -func (m *Modbus) collectFields(grouper *metric.SeriesGrouper, timestamp time.Time, tags map[string]string, requests []request) { +func collectFields(grouper *metric.SeriesGrouper, timestamp time.Time, tags map[string]string, requests []request) { for _, request := range requests { for _, field := range request.fields { // Collect tags from global and per-request diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index cf4d79ce693ba..e83e51643cfd3 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -17,8 +17,8 @@ import ( type transportMock struct { } -func (t *transportMock) RoundTrip(_ *http.Request) (*http.Response, error) { - errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + +func (*transportMock) RoundTrip(*http.Request) (*http.Response, error) { + errorString := "get http://127.0.0.1:2812/_status?format=xml: " + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + "read: connection reset by peer" return nil, errors.New(errorString) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index a1ec7dd272eb1..32f5b7e9f1da5 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -64,15 +64,15 @@ type fakeParser struct{} // fakeParser satisfies telegraf.Parser var _ telegraf.Parser = &fakeParser{} -func (p *fakeParser) Parse(_ []byte) ([]telegraf.Metric, error) { +func (*fakeParser) Parse([]byte) ([]telegraf.Metric, error) { panic("not implemented") } -func (p *fakeParser) ParseLine(_ string) (telegraf.Metric, error) { +func (*fakeParser) ParseLine(string) (telegraf.Metric, error) { panic("not implemented") } -func (p *fakeParser) SetDefaultTags(_ map[string]string) { +func (*fakeParser) SetDefaultTags(map[string]string) { panic("not implemented") } @@ -84,15 +84,15 @@ type fakeToken struct { // fakeToken satisfies mqtt.Token var _ mqtt.Token = &fakeToken{} -func (t *fakeToken) Wait() bool { +func (*fakeToken) Wait() bool { return true } -func (t *fakeToken) WaitTimeout(time.Duration) bool { +func (*fakeToken) WaitTimeout(time.Duration) bool { return true } -func (t *fakeToken) Error() error { +func (*fakeToken) Error() error { return nil } @@ -166,7 +166,7 @@ type message struct { qos byte } -func (m *message) Duplicate() bool { +func (*message) Duplicate() bool { panic("not implemented") } @@ -174,7 +174,7 @@ func (m *message) Qos() byte { return m.qos } -func (m *message) Retained() bool { +func (*message) Retained() bool { panic("not implemented") } @@ -182,15 +182,15 @@ func (m *message) Topic() string { return m.topic } -func (m *message) MessageID() uint16 { +func (*message) MessageID() uint16 { panic("not implemented") } -func (m *message) Payload() []byte { +func (*message) Payload() []byte { return []byte("cpu time_idle=42i") } -func (m *message) Ack() { +func (*message) Ack() { panic("not implemented") } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index fb37dfba571cf..174b2ea3c10a8 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -461,7 +461,7 @@ func (m *Mysql) gatherServer(server *config.Secret, acc telegraf.Accumulator) er } if m.GatherBinaryLogs { - err = m.gatherBinaryLogs(db, servtag, acc) + err = gatherBinaryLogs(db, servtag, acc) if err != nil { return err } @@ -510,35 +510,35 @@ func (m *Mysql) gatherServer(server *config.Secret, acc telegraf.Accumulator) er } if m.GatherTableIOWaits { - err = m.gatherPerfTableIOWaits(db, servtag, acc) + err = gatherPerfTableIOWaits(db, servtag, acc) if err != nil { return err } } if m.GatherIndexIOWaits { - err = m.gatherPerfIndexIOWaits(db, servtag, acc) + err = gatherPerfIndexIOWaits(db, servtag, acc) if err != nil { return err } } if m.GatherTableLockWaits { - err = m.gatherPerfTableLockWaits(db, servtag, acc) + err = gatherPerfTableLockWaits(db, servtag, acc) if err != nil { return err } } if m.GatherEventWaits { - err = m.gatherPerfEventWaits(db, servtag, acc) + err = gatherPerfEventWaits(db, servtag, acc) if err != nil { return err } } if m.GatherFileEventsStats { - err = m.gatherPerfFileEventsStatuses(db, servtag, acc) + err = gatherPerfFileEventsStatuses(db, servtag, acc) if err != nil { return err } @@ -712,7 +712,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, servtag string, acc telegraf.Acc // gatherBinaryLogs can be used to collect size and count of all binary files // binlogs metric requires the MySQL server to turn it on in configuration -func (m *Mysql) gatherBinaryLogs(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherBinaryLogs(db *sql.DB, servtag string, acc telegraf.Accumulator) error { // run query rows, err := db.Query(binaryLogsQuery) if err != nil { @@ -1174,9 +1174,8 @@ func getColSlice(rows *sql.Rows) ([]interface{}, error) { return nil, fmt.Errorf("not Supported - %d columns", l) } -// gatherPerfTableIOWaits can be used to get total count and time -// of I/O wait event for each table and process -func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +// gatherPerfTableIOWaits can be used to get total count and time of I/O wait event for each table and process +func gatherPerfTableIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfTableIOWaitsQuery) if err != nil { return err @@ -1221,9 +1220,8 @@ func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, servtag string, acc telegraf. return nil } -// gatherPerfIndexIOWaits can be used to get total count and time -// of I/O wait event for each index and process -func (m *Mysql) gatherPerfIndexIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +// gatherPerfIndexIOWaits can be used to get total count and time of I/O wait event for each index and process +func gatherPerfIndexIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfIndexIOWaitsQuery) if err != nil { return err @@ -1500,7 +1498,7 @@ func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, servtag string, // the total number and time for SQL and external lock wait events // for each table and operation // requires the MySQL server to be enabled to save this metric -func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { // check if table exists, // if performance_schema is not enabled, tables do not exist // then there is no need to scan them @@ -1627,7 +1625,7 @@ func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegra } // gatherPerfEventWaits can be used to get total time and number of event waits -func (m *Mysql) gatherPerfEventWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherPerfEventWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfEventWaitsQuery) if err != nil { return err @@ -1658,7 +1656,7 @@ func (m *Mysql) gatherPerfEventWaits(db *sql.DB, servtag string, acc telegraf.Ac } // gatherPerfFileEvents can be used to get stats on file events -func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherPerfFileEventsStatuses(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfFileEventsQuery) if err != nil { return err diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 7904800499d89..43531cc53e912 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -186,7 +186,7 @@ func (n *NatsConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (n *NatsConsumer) Gather(_ telegraf.Accumulator) error { +func (*NatsConsumer) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index d5485959177c7..97e02652419bf 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -83,12 +83,12 @@ func (n *NeptuneApex) gatherServer( if err != nil { return err } - return n.parseXML(acc, resp) + return parseXML(acc, resp) } // parseXML is strict on the input and does not do best-effort parsing. // This is because of the life-support nature of the Neptune Apex. -func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error { +func parseXML(acc telegraf.Accumulator, data []byte) error { r := xmlReply{} err := xml.Unmarshal(data, &r) if err != nil { diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index a64374cd22bde..a6f65ec96ec81 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -57,9 +57,7 @@ func TestGather(t *testing.T) { } func TestParseXML(t *testing.T) { - n := &NeptuneApex{} - goodTime := time.Date(2018, 12, 22, 21, 55, 37, 0, - time.FixedZone("PST", 3600*-8)) + goodTime := time.Date(2018, 12, 22, 21, 55, 37, 0, time.FixedZone("PST", 3600*-8)) tests := []struct { name string xmlResponse []byte @@ -363,7 +361,7 @@ func TestParseXML(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator - err := n.parseXML(&acc, test.xmlResponse) + err := parseXML(&acc, test.xmlResponse) if test.wantErr { require.Error(t, err, "expected error but got ") return diff --git a/plugins/inputs/netflow/netflow.go b/plugins/inputs/netflow/netflow.go index 218d9b5296dba..162a6d82db6a2 100644 --- a/plugins/inputs/netflow/netflow.go +++ b/plugins/inputs/netflow/netflow.go @@ -114,7 +114,7 @@ func (n *NetFlow) Start(acc telegraf.Accumulator) error { return nil } -func (n *NetFlow) Gather(_ telegraf.Accumulator) error { +func (*NetFlow) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/netflow/netflow_v5.go b/plugins/inputs/netflow/netflow_v5.go index 839a1d0943598..dadf3df0f2c80 100644 --- a/plugins/inputs/netflow/netflow_v5.go +++ b/plugins/inputs/netflow/netflow_v5.go @@ -15,14 +15,14 @@ import ( // Decoder structure type netflowv5Decoder struct{} -func (d *netflowv5Decoder) init() error { +func (*netflowv5Decoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } return nil } -func (d *netflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (*netflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { src := srcIP.String() // Decode the message diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index c1d02e5cae9f9..68edaa0cacd32 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -160,7 +160,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator te fields := map[string]interface{}{ "status": server.Status, - "status_code": check.getStatusCode(server.Status), + "status_code": getStatusCode(server.Status), "rise": server.Rise, "fall": server.Fall, } @@ -171,7 +171,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator te return nil } -func (check *NginxUpstreamCheck) getStatusCode(status string) uint8 { +func getStatusCode(status string) uint8 { switch status { case "up": return 1 diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 69f2a0aea73a1..1516e4f2a1417 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -138,7 +138,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil } -func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { +func (*NSQConsumer) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 695b8c6f601ee..e4714b0ff37f8 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -51,7 +51,7 @@ func (smi *NvidiaSMI) Start(telegraf.Accumulator) error { return nil } -func (smi *NvidiaSMI) Stop() {} +func (*NvidiaSMI) Stop() {} // Gather implements the telegraf interface func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/opentelemetry/writer.go b/plugins/inputs/opentelemetry/writer.go index b7701678edc17..6af00b6eb4af1 100644 --- a/plugins/inputs/opentelemetry/writer.go +++ b/plugins/inputs/opentelemetry/writer.go @@ -49,6 +49,6 @@ func (w *writeToAccumulator) EnqueuePoint( return nil } -func (w *writeToAccumulator) WriteBatch(_ context.Context) error { +func (*writeToAccumulator) WriteBatch(context.Context) error { return nil } diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 20709aaf750d9..1e7eb4a63aab7 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -104,7 +104,7 @@ func (pf *PF) Gather(acc telegraf.Accumulator) error { return nil } - if perr := pf.parsePfctlOutput(o, acc); perr != nil { + if perr := parsePfctlOutput(o, acc); perr != nil { acc.AddError(perr) } return nil @@ -114,7 +114,7 @@ func errMissingData(tag string) error { return fmt.Errorf("struct data for tag %q not found in %s output", tag, pfctlCommand) } -func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error { +func parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(strings.NewReader(pfoutput)) for scanner.Scan() { diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index d039685bb05f8..73f14cb776af9 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -72,7 +72,7 @@ type nilCloser struct { io.ReadWriter } -func (c *nilCloser) Close() error { return nil } +func (*nilCloser) Close() error { return nil } func TestStreams(t *testing.T) { var rec record @@ -125,11 +125,11 @@ func (c *writeOnlyConn) Write(p []byte) (int, error) { return len(p), nil } -func (c *writeOnlyConn) Read(_ []byte) (int, error) { +func (*writeOnlyConn) Read([]byte) (int, error) { return 0, errors.New("conn is write-only") } -func (c *writeOnlyConn) Close() error { +func (*writeOnlyConn) Close() error { return nil } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 802c761532ccc..d267b57ca2f28 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -31,7 +31,7 @@ import ( type statServer struct{} // We create a fake server to return test data -func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { +func (statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", strconv.Itoa(len(outputSample))) fmt.Fprint(w, outputSample) diff --git a/plugins/inputs/powerdns/powerdns_linux_test.go b/plugins/inputs/powerdns/powerdns_linux_test.go index 772bee4c4d46d..5bb576759a9f4 100644 --- a/plugins/inputs/powerdns/powerdns_linux_test.go +++ b/plugins/inputs/powerdns/powerdns_linux_test.go @@ -13,9 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -type statServer struct{} - -func (s statServer) serverSocket(l net.Listener) { +func serverSocket(l net.Listener) { for { conn, err := l.Accept() if err != nil { @@ -46,8 +44,7 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { defer socket.Close() - s := statServer{} - go s.serverSocket(socket) + go serverSocket(socket) p := &Powerdns{ UnixSockets: []string{sockname}, diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 48a77518f5a6a..3fd8e19c55a6c 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -53,7 +53,7 @@ func (p *PowerdnsRecursor) Init() error { case 2: p.gatherFromServer = p.gatherFromV2Server case 3: - p.gatherFromServer = p.gatherFromV3Server + p.gatherFromServer = gatherFromV3Server default: return fmt.Errorf("unknown control protocol version '%d', allowed values are 1, 2, 3", p.ControlProtocolVersion) } diff --git a/plugins/inputs/powerdns_recursor/protocol_v3.go b/plugins/inputs/powerdns_recursor/protocol_v3.go index b6e04e5ea58bb..9dbc9bd776fe4 100644 --- a/plugins/inputs/powerdns_recursor/protocol_v3.go +++ b/plugins/inputs/powerdns_recursor/protocol_v3.go @@ -16,7 +16,7 @@ import ( // status: uint32 // dataLength: size_t // data: byte[dataLength] -func (p *PowerdnsRecursor) gatherFromV3Server(address string, acc telegraf.Accumulator) error { +func gatherFromV3Server(address string, acc telegraf.Accumulator) error { conn, err := net.Dial("unix", address) if err != nil { return err diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 192a431acd503..976cc79636ef7 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -14,7 +14,7 @@ import ( type NativeFinder struct{} // Uid will return all pids for the given user -func (pg *NativeFinder) uid(user string) ([]pid, error) { +func (*NativeFinder) uid(user string) ([]pid, error) { var dst []pid procs, err := gopsprocess.Processes() if err != nil { @@ -34,7 +34,7 @@ func (pg *NativeFinder) uid(user string) ([]pid, error) { } // PidFile returns the pid from the pid file given. -func (pg *NativeFinder) pidFile(path string) ([]pid, error) { +func (*NativeFinder) pidFile(path string) ([]pid, error) { var pids []pid pidString, err := os.ReadFile(path) if err != nil { @@ -49,13 +49,13 @@ func (pg *NativeFinder) pidFile(path string) ([]pid, error) { } // FullPattern matches on the command line when the process was executed -func (pg *NativeFinder) fullPattern(pattern string) ([]pid, error) { +func (*NativeFinder) fullPattern(pattern string) ([]pid, error) { var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.fastProcessList() + procs, err := fastProcessList() if err != nil { return pids, err } @@ -73,7 +73,7 @@ func (pg *NativeFinder) fullPattern(pattern string) ([]pid, error) { } // Children matches children pids on the command line when the process was executed -func (pg *NativeFinder) children(processID pid) ([]pid, error) { +func (*NativeFinder) children(processID pid) ([]pid, error) { // Get all running processes p, err := gopsprocess.NewProcess(int32(processID)) if err != nil { @@ -93,7 +93,7 @@ func (pg *NativeFinder) children(processID pid) ([]pid, error) { return pids, err } -func (pg *NativeFinder) fastProcessList() ([]*gopsprocess.Process, error) { +func fastProcessList() ([]*gopsprocess.Process, error) { pids, err := gopsprocess.Pids() if err != nil { return nil, err @@ -107,13 +107,13 @@ func (pg *NativeFinder) fastProcessList() ([]*gopsprocess.Process, error) { } // Pattern matches on the process name -func (pg *NativeFinder) pattern(pattern string) ([]pid, error) { +func (*NativeFinder) pattern(pattern string) ([]pid, error) { var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.fastProcessList() + procs, err := fastProcessList() if err != nil { return pids, err } diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index add3a2dfb120d..8e61fff4449e6 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -23,7 +23,7 @@ func newPgrepFinder() (pidFinder, error) { return &pgrep{path}, nil } -func (pg *pgrep) pidFile(path string) ([]pid, error) { +func (*pgrep) pidFile(path string) ([]pid, error) { var pids []pid pidString, err := os.ReadFile(path) if err != nil { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6bf1e8402dc69..ecc8a978105be 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -617,7 +617,7 @@ func (p *Procstat) cgroupPIDs() ([]pidsTags, error) { pidTags := make([]pidsTags, 0, len(items)) for _, item := range items { - pids, err := p.singleCgroupPIDs(item) + pids, err := singleCgroupPIDs(item) if err != nil { return nil, err } @@ -628,7 +628,7 @@ func (p *Procstat) cgroupPIDs() ([]pidsTags, error) { return pidTags, nil } -func (p *Procstat) singleCgroupPIDs(path string) ([]pid, error) { +func singleCgroupPIDs(path string) ([]pid, error) { ok, err := isDir(path) if err != nil { return nil, err diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 4256f08e24234..85282ffb5df46 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -126,7 +126,7 @@ func (p *testProc) pid() pid { return p.procID } -func (p *testProc) Name() (string, error) { +func (*testProc) Name() (string, error) { return "test_proc", nil } @@ -134,7 +134,7 @@ func (p *testProc) setTag(k, v string) { p.tags[k] = v } -func (p *testProc) MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) { +func (*testProc) MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) { stats := make([]gopsprocess.MemoryMapsStat, 0) return &stats, nil } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 2c4ef136c18ca..f1b303ecdb977 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -419,7 +419,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { tags[k] = v } } - podURL := p.addressToURL(targetURL, targetURL.Hostname()) + podURL := addressToURL(targetURL, targetURL.Hostname()) // Locks earlier if using cAdvisor calls - makes a new list each time // rather than updating and removing from the same list diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 8b557a9cab979..85d2de1f41cba 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -338,7 +338,7 @@ func (p *Prometheus) initFilters() error { return nil } -func (p *Prometheus) addressToURL(u *url.URL, address string) *url.URL { +func addressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { host = address + ":" + u.Port() @@ -393,7 +393,7 @@ func (p *Prometheus) getAllURLs() (map[string]urlAndAddress, error) { continue } for _, resolved := range resolvedAddresses { - serviceURL := p.addressToURL(address, resolved) + serviceURL := addressToURL(address, resolved) allURLs[serviceURL.String()] = urlAndAddress{ url: serviceURL, address: resolved, diff --git a/plugins/inputs/radius/radius.go b/plugins/inputs/radius/radius.go index 984f31d93c2a9..efb71a1df9f2e 100644 --- a/plugins/inputs/radius/radius.go +++ b/plugins/inputs/radius/radius.go @@ -32,7 +32,7 @@ type Radius struct { //go:embed sample.conf var sampleConfig string -func (r *Radius) SampleConfig() string { +func (*Radius) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 762d2af810ef3..de2c5a82bc458 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -89,7 +89,7 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { if err != nil { return err } - tags := r.getTags(addr) + tags := getTags(addr) fields := map[string]interface{}{ "calling": calling, "writing": writing, @@ -153,7 +153,7 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } // Get tag(s) for the raindrops calling/writing plugin -func (r *Raindrops) getTags(addr *url.URL) map[string]string { +func getTags(addr *url.URL) map[string]string { h := addr.Host host, port, err := net.SplitHostPort(h) if err != nil { diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index 82def94f1484e..ac3c8692e96bb 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -35,11 +35,10 @@ writing: 200 // Verify that raindrops tags are properly parsed based on the server func TestRaindropsTags(t *testing.T) { urls := []string{"http://localhost/_raindrops", "http://localhost:80/_raindrops"} - r := &Raindrops{} for _, url1 := range urls { addr, err := url.Parse(url1) require.NoError(t, err) - tagMap := r.getTags(addr) + tagMap := getTags(addr) require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 4f31f6dda18b5..e0a19f3f18760 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -315,7 +315,7 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(client Client) { defer wg.Done() - acc.AddError(r.gatherServer(client, acc)) + acc.AddError(gatherServer(client, acc)) acc.AddError(r.gatherCommandValues(client, acc)) }(client) } @@ -344,7 +344,7 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err return nil } -func (r *Redis) gatherServer(client Client, acc telegraf.Accumulator) error { +func gatherServer(client Client, acc telegraf.Accumulator) error { info, err := client.Info().Result() if err != nil { return err @@ -774,7 +774,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value { return reflect.ValueOf(value) } -func (r *Redis) Start(telegraf.Accumulator) error { +func (*Redis) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 0e96c49c358fe..f8f0d5b540f4d 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -17,19 +17,19 @@ import ( type testClient struct{} -func (t *testClient) BaseTags() map[string]string { +func (*testClient) BaseTags() map[string]string { return map[string]string{"host": "redis.net"} } -func (t *testClient) Info() *redis.StringCmd { +func (*testClient) Info() *redis.StringCmd { return nil } -func (t *testClient) Do(_ string, _ ...interface{}) (interface{}, error) { +func (*testClient) Do(string, ...interface{}) (interface{}, error) { return 2, nil } -func (t *testClient) Close() error { +func (*testClient) Close() error { return nil } diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 79c42f583b1c2..2daf19312b4a2 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -30,7 +30,7 @@ func (*RethinkDB) SampleConfig() string { // Returns one of the errors encountered while gather stats (if any). func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { if len(r.Servers) == 0 { - return r.gatherServer(localhost, acc) + return gatherServer(localhost, acc) } var wg sync.WaitGroup @@ -47,7 +47,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func() { defer wg.Done() - acc.AddError(r.gatherServer(&Server{URL: u}, acc)) + acc.AddError(gatherServer(&Server{URL: u}, acc)) }() } @@ -56,7 +56,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { return nil } -func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error { +func gatherServer(server *Server, acc telegraf.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ Address: server.URL.Host, diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 526b7fda67fd9..e269f1bea8417 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -275,7 +275,7 @@ func (*RiemannSocketListener) SampleConfig() string { return sampleConfig } -func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { +func (*RiemannSocketListener) Gather(telegraf.Accumulator) error { return nil } From 2e5e45905a5c96534a0301cfdf93d59bd6221356 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:13:01 +0100 Subject: [PATCH 098/170] chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.42.2 to 1.43.1 (#16198) --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 3ff4ad5fe2563..f9215ddb02c2f 100644 --- a/go.mod +++ b/go.mod @@ -45,18 +45,18 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/awnumar/memguard v0.22.5 - github.com/aws/aws-sdk-go-v2 v1.32.4 + github.com/aws/aws-sdk-go-v2 v1.32.5 github.com/aws/aws-sdk-go-v2/config v1.27.39 github.com/aws/aws-sdk-go-v2/credentials v1.17.44 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.36.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 - github.com/aws/smithy-go v1.22.0 + github.com/aws/smithy-go v1.22.1 github.com/benbjohnson/clock v1.3.5 github.com/blues/jsonata-go v1.5.4 github.com/bmatcuk/doublestar/v3 v3.0.0 @@ -281,8 +281,8 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 // indirect diff --git a/go.sum b/go.sum index f8e65aa44ba19..80ead3124a04f 100644 --- a/go.sum +++ b/go.sum @@ -860,8 +860,8 @@ github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.32.4 h1:S13INUiTxgrPueTmrm5DZ+MiAo99zYzHEFh1UNkOxNE= -github.com/aws/aws-sdk-go-v2 v1.32.4/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= +github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= @@ -883,19 +883,19 @@ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayx github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 h1:A2w6m6Tmr+BNXjDsr7M90zkWjsu4JXHwrzPg235STs4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23/go.mod h1:35EVp9wyeANdujZruvHiQUAo9E3vbhnIO1mTCAxMlY0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 h1:4usbeaes3yJnCFC7kfeyhkdkPtoRYPa/hTmCqMpKpLI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24/go.mod h1:5CI1JemjVwde8m2WG3cz23qHKPOxbpkq0HaoreEgLIY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 h1:pgYW9FCabt2M25MoHYCfMrVY2ghiiBKYWUVXfwZs+sU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23/go.mod h1:c48kLgzO19wAu3CPkDWC28JbaJ+hfQlsdl7I2+oqIbk= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 h1:N1zsICrQglfzaBnrfM0Ys00860C+QFwu6u/5+LomP+o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24/go.mod h1:dCn9HbJ8+K31i8IQ8EWmWj0EiIk0+vKiHNMxTTYveAg= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2 h1:eMh+iBTF1CbpHMfiRvIaVm+rzrH1DOzuSFaR55O+bBo= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2/go.mod h1:/A4zNqF1+RS5RV+NNLKIzUX1KtK5SoWgf/OpiqrwmBo= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1 h1:FbjhJTRoTujDYDwTnnE46Km5Qh1mMSH+BwTL4ODFifg= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1/go.mod h1:OwyCzHw6CH8pkLqT8uoCkOgUsgm11LTfexLZyRy6fBg= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 h1:nawnkdqwinpBukRuDd+h0eURWHk67W4OInSJrD4NJsE= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0/go.mod h1:K27H8p8ZmsntKSSC8det8LuT5WahXoJ4vZqlWwKTRaM= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= @@ -942,8 +942,8 @@ github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= -github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= From 9b336e5ec698b0023a223303b4fcf50a984263e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:15:18 +0100 Subject: [PATCH 099/170] chore(deps): Bump github.com/rclone/rclone from 1.68.1 to 1.68.2 (#16200) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f9215ddb02c2f..e8c4b330de7bd 100644 --- a/go.mod +++ b/go.mod @@ -170,7 +170,7 @@ require ( github.com/prometheus/procfs v0.15.1 github.com/prometheus/prometheus v0.54.1 github.com/rabbitmq/amqp091-go v1.10.0 - github.com/rclone/rclone v1.68.1 + github.com/rclone/rclone v1.68.2 github.com/redis/go-redis/v9 v9.6.1 github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16 github.com/robbiet480/go.nut v0.0.0-20220219091450-bd8f121e1fa1 diff --git a/go.sum b/go.sum index 80ead3124a04f..eb3f421717cdf 100644 --- a/go.sum +++ b/go.sum @@ -2137,8 +2137,8 @@ github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzX github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= -github.com/rclone/rclone v1.68.1 h1:vlEOAuPv4gGxWECM0NIaCwBNUt3ZQY7mCsyBtZjY+68= -github.com/rclone/rclone v1.68.1/go.mod h1:T8XKOt/2Fb9INROUtFH9eF9q9o9rI1W2qTrW2bw2cYU= +github.com/rclone/rclone v1.68.2 h1:0m2tKzfTnoZRhRseRFO3CsLa5ZCXYz3xWb98ke3dz98= +github.com/rclone/rclone v1.68.2/go.mod h1:DuhVHaYIVgIdtIg8vEVt/IBwyqPJUaarr/+nG8Zg+Fg= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= From e22f7eb669440bccd633b08c49775df51c47c866 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:16:12 +0100 Subject: [PATCH 100/170] chore(deps): Bump github.com/intel/powertelemetry from 1.0.1 to 1.0.2 (#16201) Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/intel_powerstat/README.md | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e8c4b330de7bd..48ce14c31d203 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,7 @@ require ( github.com/influxdata/tail v1.0.1-0.20241014115250-3e0015cb677a github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/intel/iaevents v1.1.0 - github.com/intel/powertelemetry v1.0.1 + github.com/intel/powertelemetry v1.0.2 github.com/jackc/pgconn v1.14.3 github.com/jackc/pgio v1.0.0 github.com/jackc/pgtype v1.14.4 diff --git a/go.sum b/go.sum index eb3f421717cdf..14f35e15826f0 100644 --- a/go.sum +++ b/go.sum @@ -1598,8 +1598,8 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9s github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/intel/iaevents v1.1.0 h1:FzxMBfXk/apG2EUXUCfaq3gUQ+q+TgZ1HNMjjUILUGE= github.com/intel/iaevents v1.1.0/go.mod h1:CyUUzXw0lHRCsmyyF7Pwco9Y7NiTNQUUlcJ7RJAazKs= -github.com/intel/powertelemetry v1.0.1 h1:a35pZbqOnJlEYGEPXM+YKtetu6D6dJD4Jb4GS4Zetxs= -github.com/intel/powertelemetry v1.0.1/go.mod h1:f6pibcqhQyzN7FRwIXB4mAureaYZfJ+K8Gpm3y1gcrM= +github.com/intel/powertelemetry v1.0.2 h1:092xOflYu+YXzY3c/fQ2DpK1ePy9q9ulbm5yiNYrVkc= +github.com/intel/powertelemetry v1.0.2/go.mod h1:+PHKI9RElL7J1sTjgg3DGxtscD+IiLNmUzV1MOSCZt4= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md index ed4b6897b9420..b851df4cdeac5 100644 --- a/plugins/inputs/intel_powerstat/README.md +++ b/plugins/inputs/intel_powerstat/README.md @@ -452,6 +452,7 @@ powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c0_substate_c0_wa | 0x8F | Intel Sapphire Rapids X | ✓ | | | ✓ | | 0xCF | Intel Emerald Rapids X | ✓ | | | ✓ | | 0xAD | Intel Granite Rapids X | ✓ | | | | +| 0xAE | Intel Granite Rapids D | ✓ | | | | | 0x8A | Intel Lakefield | ✓ | | ✓ | | | 0x97 | Intel AlderLake | ✓ | | ✓ | ✓ | | 0x9A | Intel AlderLake-L | ✓ | | ✓ | ✓ | From 85f821672e1e4aba212ed447a2597934123334f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:17:11 +0100 Subject: [PATCH 101/170] chore(deps): Bump super-linter/super-linter from 7.1.0 to 7.2.0 (#16203) --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 9512af781e471..9fd8d494f046a 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -54,7 +54,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: super-linter/super-linter@v7.1.0 + uses: super-linter/super-linter@v7.2.0 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master From 372b08e4ebcfc721f556d5322af15f5ab1473a54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:17:40 +0100 Subject: [PATCH 102/170] chore(deps): Bump modernc.org/sqlite from 1.33.1 to 1.34.1 (#16202) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 48ce14c31d203..9c0ae6d3c9fb5 100644 --- a/go.mod +++ b/go.mod @@ -235,7 +235,7 @@ require ( k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.30.1 layeh.com/radius v0.0.0-20221205141417-e7fbddd11d68 - modernc.org/sqlite v1.33.1 + modernc.org/sqlite v1.34.1 ) require ( diff --git a/go.sum b/go.sum index 14f35e15826f0..af13981e637d2 100644 --- a/go.sum +++ b/go.sum @@ -3435,8 +3435,8 @@ modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.33.1 h1:trb6Z3YYoeM9eDL1O8do81kP+0ejv+YzgyFo+Gwy0nM= -modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/sqlite v1.34.1 h1:u3Yi6M0N8t9yKRDwhXcyp1eS5/ErhPTBggxWFuR6Hfk= +modernc.org/sqlite v1.34.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= From 3397b614a03431afb0e4d37eed3c1fb58ba21bd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 25 Nov 2024 11:23:17 +0100 Subject: [PATCH 103/170] chore: Fix linter findings for `revive:exported` in `plugins/inputs/n*` (#16205) --- plugins/inputs/nats/nats.go | 4 +- plugins/inputs/nats_consumer/nats_consumer.go | 85 ++-- .../nats_consumer/nats_consumer_test.go | 28 +- plugins/inputs/neoom_beaam/neoom_beaam.go | 12 +- plugins/inputs/neptune_apex/neptune_apex.go | 14 +- .../inputs/neptune_apex/neptune_apex_test.go | 4 +- plugins/inputs/net/net.go | 20 +- plugins/inputs/net/net_test.go | 6 +- plugins/inputs/net_response/net_response.go | 216 +++++---- .../inputs/net_response/net_response_test.go | 10 +- plugins/inputs/netflow/netflow.go | 30 +- plugins/inputs/netflow/netflow_decoder.go | 34 +- plugins/inputs/netflow/netflow_v5.go | 4 +- plugins/inputs/netflow/sflow_v5.go | 18 +- plugins/inputs/netstat/netstat.go | 12 +- plugins/inputs/netstat/netstat_test.go | 2 +- plugins/inputs/nfsclient/nfsclient.go | 410 +++++++++--------- plugins/inputs/nginx/nginx.go | 4 +- plugins/inputs/nginx_plus/nginx_plus.go | 4 +- .../inputs/nginx_plus_api/nginx_plus_api.go | 18 +- plugins/inputs/nginx_sts/nginx_sts.go | 14 +- .../nginx_upstream_check.go | 94 ++-- .../nginx_upstream_check_test.go | 4 +- plugins/inputs/nginx_vts/nginx_vts.go | 18 +- plugins/inputs/nomad/nomad.go | 25 +- plugins/inputs/nomad/nomad_metrics.go | 1 + plugins/inputs/nsd/nsd.go | 88 ++-- plugins/inputs/nsd/nsd_test.go | 4 +- plugins/inputs/nsq/nsq.go | 28 +- plugins/inputs/nsq/nsq_test.go | 4 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 70 ++- plugins/inputs/nstat/nstat.go | 34 +- plugins/inputs/ntpq/ntpq.go | 74 ++-- plugins/inputs/nvidia_smi/common/setters.go | 2 + .../inputs/nvidia_smi/schema_v11/parser.go | 1 + .../inputs/nvidia_smi/schema_v12/parser.go | 1 + 36 files changed, 689 insertions(+), 708 deletions(-) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index ec0e5f6767dd4..43bd4fb30199c 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -23,8 +23,8 @@ import ( var sampleConfig string type Nats struct { - Server string - ResponseTimeout config.Duration + Server string `toml:"server"` + ResponseTimeout config.Duration `toml:"response_timeout"` client *http.Client } diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index fc20cb945d710..7904800499d89 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -19,27 +19,12 @@ import ( //go:embed sample.conf var sampleConfig string -var once sync.Once - var ( + once sync.Once defaultMaxUndeliveredMessages = 1000 ) -type empty struct{} -type semaphore chan empty - -type natsError struct { - conn *nats.Conn - sub *nats.Subscription - err error -} - -func (e natsError) Error() string { - return fmt.Sprintf("%s url:%s id:%s sub:%s queue:%s", - e.err.Error(), e.conn.ConnectedUrl(), e.conn.ConnectedServerId(), e.sub.Subject, e.sub.Queue) -} - -type natsConsumer struct { +type NatsConsumer struct { QueueGroup string `toml:"queue_group"` Subjects []string `toml:"subjects"` Servers []string `toml:"servers"` @@ -70,24 +55,32 @@ type natsConsumer struct { cancel context.CancelFunc } -func (*natsConsumer) SampleConfig() string { - return sampleConfig +type ( + empty struct{} + semaphore chan empty +) + +type natsError struct { + conn *nats.Conn + sub *nats.Subscription + err error } -func (n *natsConsumer) SetParser(parser telegraf.Parser) { - n.parser = parser +func (e natsError) Error() string { + return fmt.Sprintf("%s url:%s id:%s sub:%s queue:%s", + e.err.Error(), e.conn.ConnectedUrl(), e.conn.ConnectedServerId(), e.sub.Subject, e.sub.Queue) } -func (n *natsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e error) { - select { - case n.errs <- natsError{conn: c, sub: s, err: e}: - default: - return - } +func (*NatsConsumer) SampleConfig() string { + return sampleConfig } -// Start the nats consumer. Caller must call *natsConsumer.Stop() to clean up. -func (n *natsConsumer) Start(acc telegraf.Accumulator) error { +func (n *NatsConsumer) SetParser(parser telegraf.Parser) { + n.parser = parser +} + +// Start the nats consumer. Caller must call *NatsConsumer.Stop() to clean up. +func (n *NatsConsumer) Start(acc telegraf.Accumulator) error { n.acc = acc.WithTracking(n.MaxUndeliveredMessages) options := []nats.Option{ @@ -193,9 +186,27 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { return nil } +func (n *NatsConsumer) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (n *NatsConsumer) Stop() { + n.cancel() + n.wg.Wait() + n.clean() +} + +func (n *NatsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e error) { + select { + case n.errs <- natsError{conn: c, sub: s, err: e}: + default: + return + } +} + // receiver() reads all incoming messages from NATS, and parses them into // telegraf metrics. -func (n *natsConsumer) receiver(ctx context.Context) { +func (n *NatsConsumer) receiver(ctx context.Context) { sem := make(semaphore, n.MaxUndeliveredMessages) for { @@ -237,7 +248,7 @@ func (n *natsConsumer) receiver(ctx context.Context) { } } -func (n *natsConsumer) clean() { +func (n *NatsConsumer) clean() { for _, sub := range n.subs { if err := sub.Unsubscribe(); err != nil { n.Log.Errorf("Error unsubscribing from subject %s in queue %s: %s", @@ -257,19 +268,9 @@ func (n *natsConsumer) clean() { } } -func (n *natsConsumer) Stop() { - n.cancel() - n.wg.Wait() - n.clean() -} - -func (n *natsConsumer) Gather(_ telegraf.Accumulator) error { - return nil -} - func init() { inputs.Add("nats_consumer", func() telegraf.Input { - return &natsConsumer{ + return &NatsConsumer{ Servers: []string{"nats://localhost:4222"}, Subjects: []string{"telegraf"}, QueueGroup: "telegraf_consumers", diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index e600f482148e8..27b1408747012 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -28,7 +28,7 @@ func TestStartStop(t *testing.T) { require.NoError(t, container.Start(), "failed to start container") defer container.Terminate() - plugin := &natsConsumer{ + plugin := &NatsConsumer{ Servers: []string{fmt.Sprintf("nats://%s:%s", container.Address, container.Ports["4222"])}, Subjects: []string{"telegraf"}, QueueGroup: "telegraf_consumers", @@ -140,7 +140,7 @@ func TestSendReceive(t *testing.T) { } // Setup the plugin - plugin := &natsConsumer{ + plugin := &NatsConsumer{ Servers: []string{addr}, Subjects: subjects, QueueGroup: "telegraf_consumers", @@ -161,15 +161,15 @@ func TestSendReceive(t *testing.T) { defer plugin.Stop() // Send all messages to the topics (random order due to Golang map) - publisher := &sender{Addr: addr} - require.NoError(t, publisher.Connect()) - defer publisher.Disconnect() + publisher := &sender{addr: addr} + require.NoError(t, publisher.connect()) + defer publisher.disconnect() for topic, msgs := range tt.msgs { for _, msg := range msgs { - require.NoError(t, publisher.Send(topic, msg)) + require.NoError(t, publisher.send(topic, msg)) } } - publisher.Disconnect() + publisher.disconnect() // Wait for the metrics to be collected require.Eventually(t, func() bool { @@ -185,16 +185,12 @@ func TestSendReceive(t *testing.T) { } type sender struct { - Addr string - - Username string - Password string - + addr string conn *nats.Conn } -func (s *sender) Connect() error { - conn, err := nats.Connect(s.Addr) +func (s *sender) connect() error { + conn, err := nats.Connect(s.addr) if err != nil { return err } @@ -203,7 +199,7 @@ func (s *sender) Connect() error { return nil } -func (s *sender) Disconnect() { +func (s *sender) disconnect() { if s.conn != nil && !s.conn.IsClosed() { _ = s.conn.Flush() s.conn.Close() @@ -211,6 +207,6 @@ func (s *sender) Disconnect() { s.conn = nil } -func (s *sender) Send(topic, msg string) error { +func (s *sender) send(topic, msg string) error { return s.conn.Publish(topic, []byte(msg)) } diff --git a/plugins/inputs/neoom_beaam/neoom_beaam.go b/plugins/inputs/neoom_beaam/neoom_beaam.go index 44752c94969c8..c61a2a87f535d 100644 --- a/plugins/inputs/neoom_beaam/neoom_beaam.go +++ b/plugins/inputs/neoom_beaam/neoom_beaam.go @@ -68,12 +68,6 @@ func (n *NeoomBeaam) Start(telegraf.Accumulator) error { return n.updateConfiguration() } -func (n *NeoomBeaam) Stop() { - if n.client != nil { - n.client.CloseIdleConnections() - } -} - func (n *NeoomBeaam) Gather(acc telegraf.Accumulator) error { // Refresh the config if requested if n.RefreshConfig { @@ -97,6 +91,12 @@ func (n *NeoomBeaam) Gather(acc telegraf.Accumulator) error { return nil } +func (n *NeoomBeaam) Stop() { + if n.client != nil { + n.client.CloseIdleConnections() + } +} + func (n *NeoomBeaam) updateConfiguration() error { endpoint := n.Address + "/api/v1/site/configuration" request, err := http.NewRequest("GET", endpoint, nil) diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index cba51fef9304d..d5485959177c7 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -27,6 +27,12 @@ var sampleConfig string // Measurement is constant across all metrics. const Measurement = "neptune_apex" +type NeptuneApex struct { + Servers []string `toml:"servers"` + ResponseTimeout config.Duration `toml:"response_timeout"` + httpClient *http.Client +} + type xmlReply struct { SoftwareVersion string `xml:"software,attr"` HardwareVersion string `xml:"hardware,attr"` @@ -54,18 +60,10 @@ type outlet struct { Xstatus *string `xml:"xstatus"` } -// NeptuneApex implements telegraf.Input. -type NeptuneApex struct { - Servers []string - ResponseTimeout config.Duration - httpClient *http.Client -} - func (*NeptuneApex) SampleConfig() string { return sampleConfig } -// Gather implements telegraf.Input.Gather func (n *NeptuneApex) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, server := range n.Servers { diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 2f52bee7d7f12..a64374cd22bde 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -69,7 +69,7 @@ func TestParseXML(t *testing.T) { }{ { name: "Good test", - xmlResponse: []byte(APEX2016), + xmlResponse: []byte(apex2016), wantMetrics: []telegraf.Metric{ testutil.MustMetric( Measurement, @@ -532,7 +532,7 @@ func fakeHTTPClient(h http.Handler) (*http.Client, func()) { } // Sample configuration from a 2016 version Neptune Apex. -const APEX2016 = ` +const apex2016 = ` apex AC5:12345 diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go index 03d770c75c210..a65f4af20122c 100644 --- a/plugins/inputs/net/net.go +++ b/plugins/inputs/net/net.go @@ -21,20 +21,20 @@ import ( //go:embed sample.conf var sampleConfig string -type NetIOStats struct { - filter filter.Filter - ps system.PS +type Net struct { + Interfaces []string `toml:"interfaces"` + IgnoreProtocolStats bool `toml:"ignore_protocol_stats"` - skipChecks bool - IgnoreProtocolStats bool - Interfaces []string + filter filter.Filter + ps system.PS + skipChecks bool } -func (*NetIOStats) SampleConfig() string { +func (*Net) SampleConfig() string { return sampleConfig } -func (n *NetIOStats) Init() error { +func (n *Net) Init() error { if !n.IgnoreProtocolStats { config.PrintOptionValueDeprecationNotice("inputs.net", "ignore_protocol_stats", "false", telegraf.DeprecationInfo{ @@ -48,7 +48,7 @@ func (n *NetIOStats) Init() error { return nil } -func (n *NetIOStats) Gather(acc telegraf.Accumulator) error { +func (n *Net) Gather(acc telegraf.Accumulator) error { netio, err := n.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %w", err) @@ -153,6 +153,6 @@ func getInterfaceSpeed(ioName string) int64 { func init() { inputs.Add("net", func() telegraf.Input { - return &NetIOStats{ps: system.NewSystemPS()} + return &Net{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 2f8ff0ae7dec5..537943d38c87d 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -44,7 +44,7 @@ func TestNetIOStats(t *testing.T) { t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")) - plugin := &NetIOStats{ps: &mps, skipChecks: true} + plugin := &Net{ps: &mps, skipChecks: true} var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -111,7 +111,7 @@ func TestNetIOStatsSpeedUnsupported(t *testing.T) { t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")) - plugin := &NetIOStats{ps: &mps, skipChecks: true} + plugin := &Net{ps: &mps, skipChecks: true} var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -178,7 +178,7 @@ func TestNetIOStatsNoSpeedFile(t *testing.T) { t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")) - plugin := &NetIOStats{ps: &mps, skipChecks: true} + plugin := &Net{ps: &mps, skipChecks: true} var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index f54f9b4629eb7..e51ff4db40e07 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -20,33 +20,101 @@ import ( //go:embed sample.conf var sampleConfig string -type ResultType uint64 +type resultType uint64 const ( - Success ResultType = 0 - Timeout ResultType = 1 - ConnectionFailed ResultType = 2 - ReadFailed ResultType = 3 - StringMismatch ResultType = 4 + success resultType = 0 + timeout resultType = 1 + connectionFailed resultType = 2 + readFailed resultType = 3 + stringMismatch resultType = 4 ) -// NetResponse struct type NetResponse struct { - Address string - Timeout config.Duration - ReadTimeout config.Duration - Send string - Expect string - Protocol string + Address string `toml:"address"` + Timeout config.Duration `toml:"timeout"` + ReadTimeout config.Duration `toml:"read_timeout"` + Send string `toml:"send"` + Expect string `toml:"expect"` + Protocol string `toml:"protocol"` } func (*NetResponse) SampleConfig() string { return sampleConfig } -// TCPGather will execute if there are TCP tests defined in the configuration. -// It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, error) { +func (n *NetResponse) Init() error { + // Set default values + if n.Timeout == 0 { + n.Timeout = config.Duration(time.Second) + } + if n.ReadTimeout == 0 { + n.ReadTimeout = config.Duration(time.Second) + } + // Check send and expected string + if n.Protocol == "udp" && n.Send == "" { + return errors.New("send string cannot be empty") + } + if n.Protocol == "udp" && n.Expect == "" { + return errors.New("expected string cannot be empty") + } + // Prepare host and port + host, port, err := net.SplitHostPort(n.Address) + if err != nil { + return err + } + if host == "" { + n.Address = "localhost:" + port + } + if port == "" { + return errors.New("bad port in config option address") + } + + if err := choice.Check(n.Protocol, []string{"tcp", "udp"}); err != nil { + return fmt.Errorf("config option protocol: %w", err) + } + + return nil +} + +func (n *NetResponse) Gather(acc telegraf.Accumulator) error { + // Prepare host and port + host, port, err := net.SplitHostPort(n.Address) + if err != nil { + return err + } + + // Prepare data + tags := map[string]string{"server": host, "port": port} + var fields map[string]interface{} + var returnTags map[string]string + + // Gather data + switch n.Protocol { + case "tcp": + returnTags, fields, err = n.tcpGather() + if err != nil { + return err + } + tags["protocol"] = "tcp" + case "udp": + returnTags, fields, err = n.udpGather() + if err != nil { + return err + } + tags["protocol"] = "udp" + } + + // Merge the tags + for k, v := range returnTags { + tags[k] = v + } + // Add metrics + acc.AddFields("net_response", fields, tags) + return nil +} + +func (n *NetResponse) tcpGather() (map[string]string, map[string]interface{}, error) { // Prepare returns tags := make(map[string]string) fields := make(map[string]interface{}) @@ -60,9 +128,9 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er if err != nil { var e net.Error if errors.As(err, &e) && e.Timeout() { - setResult(Timeout, fields, tags, n.Expect) + setResult(timeout, fields, tags, n.Expect) } else { - setResult(ConnectionFailed, fields, tags, n.Expect) + setResult(connectionFailed, fields, tags, n.Expect) } return tags, fields, nil } @@ -91,27 +159,25 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er responseTime = time.Since(start).Seconds() // Handle error if err != nil { - setResult(ReadFailed, fields, tags, n.Expect) + setResult(readFailed, fields, tags, n.Expect) } else { // Looking for string in answer regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) find := regEx.FindString(data) if find != "" { - setResult(Success, fields, tags, n.Expect) + setResult(success, fields, tags, n.Expect) } else { - setResult(StringMismatch, fields, tags, n.Expect) + setResult(stringMismatch, fields, tags, n.Expect) } } } else { - setResult(Success, fields, tags, n.Expect) + setResult(success, fields, tags, n.Expect) } fields["response_time"] = responseTime return tags, fields, nil } -// UDPGather will execute if there are UDP tests defined in the configuration. -// It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, error) { +func (n *NetResponse) udpGather() (map[string]string, map[string]interface{}, error) { // Prepare returns tags := make(map[string]string) fields := make(map[string]interface{}) @@ -121,14 +187,14 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er udpAddr, err := net.ResolveUDPAddr("udp", n.Address) // Handle error if err != nil { - setResult(ConnectionFailed, fields, tags, n.Expect) + setResult(connectionFailed, fields, tags, n.Expect) return tags, fields, nil } // Connecting conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { - setResult(ConnectionFailed, fields, tags, n.Expect) + setResult(connectionFailed, fields, tags, n.Expect) return tags, fields, nil } defer conn.Close() @@ -149,7 +215,7 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er responseTime := time.Since(start).Seconds() // Handle error if err != nil { - setResult(ReadFailed, fields, tags, n.Expect) + setResult(readFailed, fields, tags, n.Expect) return tags, fields, nil } @@ -157,9 +223,9 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) find := regEx.FindString(string(buf)) if find != "" { - setResult(Success, fields, tags, n.Expect) + setResult(success, fields, tags, n.Expect) } else { - setResult(StringMismatch, fields, tags, n.Expect) + setResult(stringMismatch, fields, tags, n.Expect) } fields["response_time"] = responseTime @@ -167,94 +233,18 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er return tags, fields, nil } -// Init performs one time setup of the plugin and returns an error if the -// configuration is invalid. -func (n *NetResponse) Init() error { - // Set default values - if n.Timeout == 0 { - n.Timeout = config.Duration(time.Second) - } - if n.ReadTimeout == 0 { - n.ReadTimeout = config.Duration(time.Second) - } - // Check send and expected string - if n.Protocol == "udp" && n.Send == "" { - return errors.New("send string cannot be empty") - } - if n.Protocol == "udp" && n.Expect == "" { - return errors.New("expected string cannot be empty") - } - // Prepare host and port - host, port, err := net.SplitHostPort(n.Address) - if err != nil { - return err - } - if host == "" { - n.Address = "localhost:" + port - } - if port == "" { - return errors.New("bad port in config option address") - } - - if err := choice.Check(n.Protocol, []string{"tcp", "udp"}); err != nil { - return fmt.Errorf("config option protocol: %w", err) - } - - return nil -} - -// Gather is called by telegraf when the plugin is executed on its interval. -// It will call either UDPGather or TCPGather based on the configuration and -// also fill an Accumulator that is supplied. -func (n *NetResponse) Gather(acc telegraf.Accumulator) error { - // Prepare host and port - host, port, err := net.SplitHostPort(n.Address) - if err != nil { - return err - } - - // Prepare data - tags := map[string]string{"server": host, "port": port} - var fields map[string]interface{} - var returnTags map[string]string - - // Gather data - switch n.Protocol { - case "tcp": - returnTags, fields, err = n.TCPGather() - if err != nil { - return err - } - tags["protocol"] = "tcp" - case "udp": - returnTags, fields, err = n.UDPGather() - if err != nil { - return err - } - tags["protocol"] = "udp" - } - - // Merge the tags - for k, v := range returnTags { - tags[k] = v - } - // Add metrics - acc.AddFields("net_response", fields, tags) - return nil -} - -func setResult(result ResultType, fields map[string]interface{}, tags map[string]string, expect string) { +func setResult(result resultType, fields map[string]interface{}, tags map[string]string, expect string) { var tag string switch result { - case Success: + case success: tag = "success" - case Timeout: + case timeout: tag = "timeout" - case ConnectionFailed: + case connectionFailed: tag = "connection_failed" - case ReadFailed: + case readFailed: tag = "read_failed" - case StringMismatch: + case stringMismatch: tag = "string_mismatch" } @@ -266,7 +256,7 @@ func setResult(result ResultType, fields map[string]interface{}, tags map[string // deprecated in 1.4; use result tag if expect != "" { - fields["string_found"] = result == Success + fields["string_found"] = result == success } } diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index bfb6c2ce803c5..8621d2ca68c96 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -106,7 +106,7 @@ func TestTCPOK1(t *testing.T) { require.NoError(t, c.Init()) // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + go tcpServer(t, &wg) wg.Wait() // Wait for the server to spin up wg.Add(1) // Connect @@ -151,7 +151,7 @@ func TestTCPOK2(t *testing.T) { require.NoError(t, c.Init()) // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + go tcpServer(t, &wg) wg.Wait() wg.Add(1) @@ -233,7 +233,7 @@ func TestUDPOK1(t *testing.T) { require.NoError(t, c.Init()) // Start UDP server wg.Add(1) - go UDPServer(t, &wg) + go udpServer(t, &wg) wg.Wait() wg.Add(1) @@ -264,7 +264,7 @@ func TestUDPOK1(t *testing.T) { wg.Wait() } -func UDPServer(t *testing.T, wg *sync.WaitGroup) { +func udpServer(t *testing.T, wg *sync.WaitGroup) { defer wg.Done() udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:2004") if err != nil { @@ -297,7 +297,7 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup) { } } -func TCPServer(t *testing.T, wg *sync.WaitGroup) { +func tcpServer(t *testing.T, wg *sync.WaitGroup) { defer wg.Done() tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") if err != nil { diff --git a/plugins/inputs/netflow/netflow.go b/plugins/inputs/netflow/netflow.go index ad0a351e5de67..218d9b5296dba 100644 --- a/plugins/inputs/netflow/netflow.go +++ b/plugins/inputs/netflow/netflow.go @@ -19,11 +19,6 @@ import ( //go:embed sample.conf var sampleConfig string -type protocolDecoder interface { - Init() error - Decode(net.IP, []byte) ([]telegraf.Metric, error) -} - type NetFlow struct { ServiceAddress string `toml:"service_address"` ReadBufferSize config.Size `toml:"read_buffer_size"` @@ -37,6 +32,11 @@ type NetFlow struct { wg sync.WaitGroup } +type protocolDecoder interface { + init() error + decode(net.IP, []byte) ([]telegraf.Metric, error) +} + func (*NetFlow) SampleConfig() string { return sampleConfig } @@ -61,12 +61,12 @@ func (n *NetFlow) Init() error { n.Log.Warn("'private_enterprise_number_files' option will be ignored in 'netflow v9'") } n.decoder = &netflowDecoder{ - Log: n.Log, + log: n.Log, } case "", "ipfix": n.decoder = &netflowDecoder{ - PENFiles: n.PENFiles, - Log: n.Log, + penFiles: n.PENFiles, + log: n.Log, } case "netflow v5": if len(n.PENFiles) != 0 { @@ -74,12 +74,12 @@ func (n *NetFlow) Init() error { } n.decoder = &netflowv5Decoder{} case "sflow", "sflow v5": - n.decoder = &sflowv5Decoder{Log: n.Log} + n.decoder = &sflowv5Decoder{log: n.Log} default: return fmt.Errorf("invalid protocol %q, only supports 'sflow', 'netflow v5', 'netflow v9' and 'ipfix'", n.Protocol) } - return n.decoder.Init() + return n.decoder.init() } func (n *NetFlow) Start(acc telegraf.Accumulator) error { @@ -114,6 +114,10 @@ func (n *NetFlow) Start(acc telegraf.Accumulator) error { return nil } +func (n *NetFlow) Gather(_ telegraf.Accumulator) error { + return nil +} + func (n *NetFlow) Stop() { if n.conn != nil { _ = n.conn.Close() @@ -138,7 +142,7 @@ func (n *NetFlow) read(acc telegraf.Accumulator) { if n.Log.Level().Includes(telegraf.Trace) || n.DumpPackets { // for backward compatibility n.Log.Tracef("raw data: %s", hex.EncodeToString(buf[:count])) } - metrics, err := n.decoder.Decode(src.IP, buf[:count]) + metrics, err := n.decoder.decode(src.IP, buf[:count]) if err != nil { errWithData := fmt.Errorf("%w; raw data: %s", err, hex.EncodeToString(buf[:count])) acc.AddError(errWithData) @@ -150,10 +154,6 @@ func (n *NetFlow) read(acc telegraf.Accumulator) { } } -func (n *NetFlow) Gather(_ telegraf.Accumulator) error { - return nil -} - // Register the plugin func init() { inputs.Add("netflow", func() telegraf.Input { diff --git a/plugins/inputs/netflow/netflow_decoder.go b/plugins/inputs/netflow/netflow_decoder.go index 8285db76318f0..aa6f40fc8305a 100644 --- a/plugins/inputs/netflow/netflow_decoder.go +++ b/plugins/inputs/netflow/netflow_decoder.go @@ -530,8 +530,8 @@ var fieldMappingsIPFIX = map[uint16][]fieldMapping{ // Decoder structure type netflowDecoder struct { - PENFiles []string - Log telegraf.Logger + penFiles []string + log telegraf.Logger templates map[string]netflow.NetFlowTemplateSystem mappingsV9 map[uint16]fieldMapping @@ -542,7 +542,7 @@ type netflowDecoder struct { sync.Mutex } -func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (d *netflowDecoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { var metrics []telegraf.Metric t := time.Now() @@ -563,7 +563,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric if err := netflow.DecodeMessageVersion(buf, templates, &msg9, &msg10); err != nil { if errors.Is(err, netflow.ErrorTemplateNotFound) { msg := "Skipping packet until the device resends the required template..." - d.Log.Warnf("%v. %s", err, msg) + d.log.Warnf("%v. %s", err, msg) return nil, nil } return nil, fmt.Errorf("decoding message failed: %w", err) @@ -587,7 +587,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.ScopesValues { decodedFields, err := d.decodeValueV9(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -597,7 +597,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.OptionsValues { decodedFields, err := d.decodeValueV9(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -616,7 +616,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.Values { decodedFields, err := d.decodeValueV9(value) if err != nil { - d.Log.Errorf("decoding record %+v failed: %v", record, err) + d.log.Errorf("decoding record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -643,7 +643,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.ScopesValues { decodedFields, err := d.decodeValueIPFIX(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -653,7 +653,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.OptionsValues { decodedFields, err := d.decodeValueIPFIX(value) if err != nil { - d.Log.Errorf("decoding option record %+v failed: %v", record, err) + d.log.Errorf("decoding option record %+v failed: %v", record, err) continue } for _, field := range decodedFields { @@ -673,7 +673,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric for _, value := range record.Values { decodedFields, err := d.decodeValueIPFIX(value) if err != nil { - d.Log.Errorf("decoding value %+v failed: %v", value, err) + d.log.Errorf("decoding value %+v failed: %v", value, err) continue } for _, field := range decodedFields { @@ -691,7 +691,7 @@ func (d *netflowDecoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric return metrics, nil } -func (d *netflowDecoder) Init() error { +func (d *netflowDecoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } @@ -703,8 +703,8 @@ func (d *netflowDecoder) Init() error { d.mappingsV9 = make(map[uint16]fieldMapping) d.mappingsIPFIX = make(map[uint16]fieldMapping) d.mappingsPEN = make(map[string]fieldMapping) - for _, fn := range d.PENFiles { - d.Log.Debugf("Loading PEN mapping file %q...", fn) + for _, fn := range d.penFiles { + d.log.Debugf("Loading PEN mapping file %q...", fn) mappings, err := loadMapping(fn) if err != nil { return err @@ -719,7 +719,7 @@ func (d *netflowDecoder) Init() error { d.mappingsPEN[k] = v } } - d.Log.Infof("Loaded %d PEN mappings...", len(d.mappingsPEN)) + d.log.Infof("Loaded %d PEN mappings...", len(d.mappingsPEN)) d.logged = make(map[string]bool) @@ -783,7 +783,7 @@ func (d *netflowDecoder) decodeValueV9(field netflow.DataField) ([]telegraf.Fiel // Return the raw data if no mapping was found key := fmt.Sprintf("type_%d", elementID) if !d.logged[key] { - d.Log.Debugf("unknown Netflow v9 data field %v", field) + d.log.Debugf("unknown Netflow v9 data field %v", field) d.logged[key] = true } v, err := decodeHex(raw) @@ -817,7 +817,7 @@ func (d *netflowDecoder) decodeValueIPFIX(field netflow.DataField) ([]telegraf.F return []telegraf.Field{{Key: name, Value: v}}, nil } if !d.logged[key] { - d.Log.Debugf("unknown IPFIX PEN data field %v", field) + d.log.Debugf("unknown IPFIX PEN data field %v", field) d.logged[key] = true } name := fmt.Sprintf("type_%d_%s%d", field.Pen, prefix, elementID) @@ -866,7 +866,7 @@ func (d *netflowDecoder) decodeValueIPFIX(field netflow.DataField) ([]telegraf.F // Return the raw data if no mapping was found key := fmt.Sprintf("type_%d", elementID) if !d.logged[key] { - d.Log.Debugf("unknown IPFIX data field %v", field) + d.log.Debugf("unknown IPFIX data field %v", field) d.logged[key] = true } v, err := decodeHex(raw) diff --git a/plugins/inputs/netflow/netflow_v5.go b/plugins/inputs/netflow/netflow_v5.go index ee3e9d2c3b662..839a1d0943598 100644 --- a/plugins/inputs/netflow/netflow_v5.go +++ b/plugins/inputs/netflow/netflow_v5.go @@ -15,14 +15,14 @@ import ( // Decoder structure type netflowv5Decoder struct{} -func (d *netflowv5Decoder) Init() error { +func (d *netflowv5Decoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } return nil } -func (d *netflowv5Decoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (d *netflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { src := srcIP.String() // Decode the message diff --git a/plugins/inputs/netflow/sflow_v5.go b/plugins/inputs/netflow/sflow_v5.go index 4d7a773b7d654..6e43680f3a597 100644 --- a/plugins/inputs/netflow/sflow_v5.go +++ b/plugins/inputs/netflow/sflow_v5.go @@ -19,13 +19,13 @@ import ( // Decoder structure type sflowv5Decoder struct { - Log telegraf.Logger + log telegraf.Logger warnedCounterRaw map[uint32]bool warnedFlowRaw map[int64]bool } -func (d *sflowv5Decoder) Init() error { +func (d *sflowv5Decoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } @@ -35,7 +35,7 @@ func (d *sflowv5Decoder) Init() error { return nil } -func (d *sflowv5Decoder) Decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (d *sflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { t := time.Now() src := srcIP.String() @@ -448,11 +448,11 @@ func (d *sflowv5Decoder) decodeRawHeaderSample(record *sflow.SampledHeader) (map if !d.warnedFlowRaw[ltype] { contents := hex.EncodeToString(pkt.LayerContents()) payload := hex.EncodeToString(pkt.LayerPayload()) - d.Log.Warnf("Unknown flow raw flow message %s (%d):", pkt.LayerType().String(), pkt.LayerType()) - d.Log.Warnf(" contents: %s", contents) - d.Log.Warnf(" payload: %s", payload) + d.log.Warnf("Unknown flow raw flow message %s (%d):", pkt.LayerType().String(), pkt.LayerType()) + d.log.Warnf(" contents: %s", contents) + d.log.Warnf(" payload: %s", payload) - d.Log.Warn("This message is only printed once.") + d.log.Warn("This message is only printed once.") } d.warnedFlowRaw[ltype] = true } @@ -524,8 +524,8 @@ func (d *sflowv5Decoder) decodeCounterRecords(records []sflow.CounterRecord) (ma default: if !d.warnedCounterRaw[r.Header.DataFormat] { data := hex.EncodeToString(record.Data) - d.Log.Warnf("Unknown counter raw flow message %d: %s", r.Header.DataFormat, data) - d.Log.Warn("This message is only printed once.") + d.log.Warnf("Unknown counter raw flow message %d: %s", r.Header.DataFormat, data) + d.log.Warn("This message is only printed once.") } d.warnedCounterRaw[r.Header.DataFormat] = true } diff --git a/plugins/inputs/netstat/netstat.go b/plugins/inputs/netstat/netstat.go index 18da7083cdaea..febdfa50d3bfb 100644 --- a/plugins/inputs/netstat/netstat.go +++ b/plugins/inputs/netstat/netstat.go @@ -14,16 +14,16 @@ import ( //go:embed sample.conf var sampleConfig string -type NetStats struct { - PS system.PS +type NetStat struct { + ps system.PS } -func (*NetStats) SampleConfig() string { +func (*NetStat) SampleConfig() string { return sampleConfig } -func (ns *NetStats) Gather(acc telegraf.Accumulator) error { - netconns, err := ns.PS.NetConnections() +func (ns *NetStat) Gather(acc telegraf.Accumulator) error { + netconns, err := ns.ps.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %w", err) } @@ -66,6 +66,6 @@ func (ns *NetStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("netstat", func() telegraf.Input { - return &NetStats{PS: system.NewSystemPS()} + return &NetStat{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/netstat/netstat_test.go b/plugins/inputs/netstat/netstat_test.go index 05cc4b227eb44..5ae6e48151382 100644 --- a/plugins/inputs/netstat/netstat_test.go +++ b/plugins/inputs/netstat/netstat_test.go @@ -32,7 +32,7 @@ func TestNetStats(t *testing.T) { }, nil) var acc testutil.Accumulator - require.NoError(t, (&NetStats{PS: &mps}).Gather(&acc)) + require.NoError(t, (&NetStat{ps: &mps}).Gather(&acc)) expected := []telegraf.Metric{ metric.New( diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index 3f1f71835b659..e496fe476e7e2 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -31,36 +31,193 @@ type NFSClient struct { mountstatsPath string } -func convertToUint64(line []string) ([]uint64, error) { - /* A "line" of input data (a pre-split array of strings) is - processed one field at a time. Each field is converted to - an uint64 value, and appended to an array of return values. - On an error, check for ErrRange, and returns an error - if found. This situation indicates a pretty major issue in - the /proc/self/mountstats file, and returning faulty data - is worse than no data. Other errors are ignored, and append - whatever we got in the first place (probably 0). - Yes, this is ugly. */ +func (*NFSClient) SampleConfig() string { + return sampleConfig +} - if len(line) < 2 { - return nil, nil +func (n *NFSClient) Init() error { + var nfs3Fields = []string{ + "NULL", + "GETATTR", + "SETATTR", + "LOOKUP", + "ACCESS", + "READLINK", + "READ", + "WRITE", + "CREATE", + "MKDIR", + "SYMLINK", + "MKNOD", + "REMOVE", + "RMDIR", + "RENAME", + "LINK", + "READDIR", + "READDIRPLUS", + "FSSTAT", + "FSINFO", + "PATHCONF", + "COMMIT", } - nline := make([]uint64, 0, len(line[1:])) - // Skip the first field; it's handled specially as the "first" variable - for _, l := range line[1:] { - val, err := strconv.ParseUint(l, 10, 64) - if err != nil { - var numError *strconv.NumError - if errors.As(err, &numError) { - if errors.Is(numError.Err, strconv.ErrRange) { - return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) - } + var nfs4Fields = []string{ + "NULL", + "READ", + "WRITE", + "COMMIT", + "OPEN", + "OPEN_CONFIRM", + "OPEN_NOATTR", + "OPEN_DOWNGRADE", + "CLOSE", + "SETATTR", + "FSINFO", + "RENEW", + "SETCLIENTID", + "SETCLIENTID_CONFIRM", + "LOCK", + "LOCKT", + "LOCKU", + "ACCESS", + "GETATTR", + "LOOKUP", + "LOOKUP_ROOT", + "REMOVE", + "RENAME", + "LINK", + "SYMLINK", + "CREATE", + "PATHCONF", + "STATFS", + "READLINK", + "READDIR", + "SERVER_CAPS", + "DELEGRETURN", + "GETACL", + "SETACL", + "FS_LOCATIONS", + "RELEASE_LOCKOWNER", + "SECINFO", + "FSID_PRESENT", + "EXCHANGE_ID", + "CREATE_SESSION", + "DESTROY_SESSION", + "SEQUENCE", + "GET_LEASE_TIME", + "RECLAIM_COMPLETE", + "LAYOUTGET", + "GETDEVICEINFO", + "LAYOUTCOMMIT", + "LAYOUTRETURN", + "SECINFO_NO_NAME", + "TEST_STATEID", + "FREE_STATEID", + "GETDEVICELIST", + "BIND_CONN_TO_SESSION", + "DESTROY_CLIENTID", + "SEEK", + "ALLOCATE", + "DEALLOCATE", + "LAYOUTSTATS", + "CLONE", + "COPY", + "OFFLOAD_CANCEL", + "LOOKUPP", + "LAYOUTERROR", + "COPY_NOTIFY", + "GETXATTR", + "SETXATTR", + "LISTXATTRS", + "REMOVEXATTR", + } + + nfs3Ops := make(map[string]bool) + nfs4Ops := make(map[string]bool) + + n.mountstatsPath = n.getMountStatsPath() + + if len(n.IncludeOperations) == 0 { + for _, Op := range nfs3Fields { + nfs3Ops[Op] = true + } + for _, Op := range nfs4Fields { + nfs4Ops[Op] = true + } + } else { + for _, Op := range n.IncludeOperations { + nfs3Ops[Op] = true + } + for _, Op := range n.IncludeOperations { + nfs4Ops[Op] = true + } + } + + if len(n.ExcludeOperations) > 0 { + for _, Op := range n.ExcludeOperations { + if nfs3Ops[Op] { + delete(nfs3Ops, Op) + } + if nfs4Ops[Op] { + delete(nfs4Ops, Op) } } - nline = append(nline, val) } - return nline, nil + + n.nfs3Ops = nfs3Ops + n.nfs4Ops = nfs4Ops + + if len(n.IncludeMounts) > 0 { + n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) + } else { + n.Log.Debugf("Including all mounts.") + } + + if len(n.ExcludeMounts) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) + } else { + n.Log.Debugf("Not excluding any mounts.") + } + + if len(n.IncludeOperations) > 0 { + n.Log.Debugf("Including these operations: %v", n.IncludeOperations) + } else { + n.Log.Debugf("Including all operations.") + } + + if len(n.ExcludeOperations) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) + } else { + n.Log.Debugf("Not excluding any operations.") + } + + return nil +} + +func (n *NFSClient) Gather(acc telegraf.Accumulator) error { + if _, err := os.Stat(n.mountstatsPath); os.IsNotExist(err) { + return err + } + + // Attempt to read the file to see if we have permissions before opening + // which can lead to a panic + if _, err := os.ReadFile(n.mountstatsPath); err != nil { + return err + } + + file, err := os.Open(n.mountstatsPath) + if err != nil { + n.Log.Errorf("Failed opening the %q file: %v ", file.Name(), err) + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if err := n.processText(scanner, acc); err != nil { + return err + } + + return scanner.Err() } func (n *NFSClient) parseStat(mountpoint, export, version string, line []string, acc telegraf.Accumulator) error { @@ -291,193 +448,36 @@ func (n *NFSClient) getMountStatsPath() string { return path } -func (*NFSClient) SampleConfig() string { - return sampleConfig -} - -func (n *NFSClient) Gather(acc telegraf.Accumulator) error { - if _, err := os.Stat(n.mountstatsPath); os.IsNotExist(err) { - return err - } - - // Attempt to read the file to see if we have permissions before opening - // which can lead to a panic - if _, err := os.ReadFile(n.mountstatsPath); err != nil { - return err - } - - file, err := os.Open(n.mountstatsPath) - if err != nil { - n.Log.Errorf("Failed opening the %q file: %v ", file.Name(), err) - return err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - if err := n.processText(scanner, acc); err != nil { - return err - } - - return scanner.Err() -} - -func (n *NFSClient) Init() error { - var nfs3Fields = []string{ - "NULL", - "GETATTR", - "SETATTR", - "LOOKUP", - "ACCESS", - "READLINK", - "READ", - "WRITE", - "CREATE", - "MKDIR", - "SYMLINK", - "MKNOD", - "REMOVE", - "RMDIR", - "RENAME", - "LINK", - "READDIR", - "READDIRPLUS", - "FSSTAT", - "FSINFO", - "PATHCONF", - "COMMIT", - } - - var nfs4Fields = []string{ - "NULL", - "READ", - "WRITE", - "COMMIT", - "OPEN", - "OPEN_CONFIRM", - "OPEN_NOATTR", - "OPEN_DOWNGRADE", - "CLOSE", - "SETATTR", - "FSINFO", - "RENEW", - "SETCLIENTID", - "SETCLIENTID_CONFIRM", - "LOCK", - "LOCKT", - "LOCKU", - "ACCESS", - "GETATTR", - "LOOKUP", - "LOOKUP_ROOT", - "REMOVE", - "RENAME", - "LINK", - "SYMLINK", - "CREATE", - "PATHCONF", - "STATFS", - "READLINK", - "READDIR", - "SERVER_CAPS", - "DELEGRETURN", - "GETACL", - "SETACL", - "FS_LOCATIONS", - "RELEASE_LOCKOWNER", - "SECINFO", - "FSID_PRESENT", - "EXCHANGE_ID", - "CREATE_SESSION", - "DESTROY_SESSION", - "SEQUENCE", - "GET_LEASE_TIME", - "RECLAIM_COMPLETE", - "LAYOUTGET", - "GETDEVICEINFO", - "LAYOUTCOMMIT", - "LAYOUTRETURN", - "SECINFO_NO_NAME", - "TEST_STATEID", - "FREE_STATEID", - "GETDEVICELIST", - "BIND_CONN_TO_SESSION", - "DESTROY_CLIENTID", - "SEEK", - "ALLOCATE", - "DEALLOCATE", - "LAYOUTSTATS", - "CLONE", - "COPY", - "OFFLOAD_CANCEL", - "LOOKUPP", - "LAYOUTERROR", - "COPY_NOTIFY", - "GETXATTR", - "SETXATTR", - "LISTXATTRS", - "REMOVEXATTR", - } - - nfs3Ops := make(map[string]bool) - nfs4Ops := make(map[string]bool) - - n.mountstatsPath = n.getMountStatsPath() +func convertToUint64(line []string) ([]uint64, error) { + /* A "line" of input data (a pre-split array of strings) is + processed one field at a time. Each field is converted to + an uint64 value, and appended to an array of return values. + On an error, check for ErrRange, and returns an error + if found. This situation indicates a pretty major issue in + the /proc/self/mountstats file, and returning faulty data + is worse than no data. Other errors are ignored, and append + whatever we got in the first place (probably 0). + Yes, this is ugly. */ - if len(n.IncludeOperations) == 0 { - for _, Op := range nfs3Fields { - nfs3Ops[Op] = true - } - for _, Op := range nfs4Fields { - nfs4Ops[Op] = true - } - } else { - for _, Op := range n.IncludeOperations { - nfs3Ops[Op] = true - } - for _, Op := range n.IncludeOperations { - nfs4Ops[Op] = true - } + if len(line) < 2 { + return nil, nil } - if len(n.ExcludeOperations) > 0 { - for _, Op := range n.ExcludeOperations { - if nfs3Ops[Op] { - delete(nfs3Ops, Op) - } - if nfs4Ops[Op] { - delete(nfs4Ops, Op) + nline := make([]uint64, 0, len(line[1:])) + // Skip the first field; it's handled specially as the "first" variable + for _, l := range line[1:] { + val, err := strconv.ParseUint(l, 10, 64) + if err != nil { + var numError *strconv.NumError + if errors.As(err, &numError) { + if errors.Is(numError.Err, strconv.ErrRange) { + return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) + } } } + nline = append(nline, val) } - - n.nfs3Ops = nfs3Ops - n.nfs4Ops = nfs4Ops - - if len(n.IncludeMounts) > 0 { - n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) - } else { - n.Log.Debugf("Including all mounts.") - } - - if len(n.ExcludeMounts) > 0 { - n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) - } else { - n.Log.Debugf("Not excluding any mounts.") - } - - if len(n.IncludeOperations) > 0 { - n.Log.Debugf("Including these operations: %v", n.IncludeOperations) - } else { - n.Log.Debugf("Including all operations.") - } - - if len(n.ExcludeOperations) > 0 { - n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) - } else { - n.Log.Debugf("Not excluding any operations.") - } - - return nil + return nline, nil } func init() { diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index bf6909208dcfb..0d20653cff546 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -23,8 +23,8 @@ import ( var sampleConfig string type Nginx struct { - Urls []string - ResponseTimeout config.Duration + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig // HTTP client diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index ed9b450acd277..2f32161368404 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -276,11 +276,11 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum if err := dec.Decode(status); err != nil { return errors.New("error while decoding JSON response") } - status.Gather(tags, acc) + status.gather(tags, acc) return nil } -func (s *status) Gather(tags map[string]string, acc telegraf.Accumulator) { +func (s *status) gather(tags map[string]string, acc telegraf.Accumulator) { s.gatherProcessesMetrics(tags, acc) s.gatherConnectionsMetrics(tags, acc) s.gatherSslMetrics(tags, acc) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index acf3be64a57f4..02dbba3516690 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -18,15 +18,6 @@ import ( //go:embed sample.conf var sampleConfig string -type NginxPlusAPI struct { - Urls []string `toml:"urls"` - APIVersion int64 `toml:"api_version"` - ResponseTimeout config.Duration `toml:"response_timeout"` - tls.ClientConfig - - client *http.Client -} - const ( // Default settings defaultAPIVersion = 3 @@ -49,6 +40,15 @@ const ( streamUpstreamsPath = "stream/upstreams" ) +type NginxPlusAPI struct { + Urls []string `toml:"urls"` + APIVersion int64 `toml:"api_version"` + ResponseTimeout config.Duration `toml:"response_timeout"` + tls.ClientConfig + + client *http.Client +} + func (*NginxPlusAPI) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go index 75dddeb9b9481..18c7bba4eedea 100644 --- a/plugins/inputs/nginx_sts/nginx_sts.go +++ b/plugins/inputs/nginx_sts/nginx_sts.go @@ -106,7 +106,7 @@ func (n *NginxSTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } } -type NginxSTSResponse struct { +type nginxSTSResponse struct { Connections struct { Active uint64 `json:"active"` Reading uint64 `json:"reading"` @@ -117,12 +117,12 @@ type NginxSTSResponse struct { Requests uint64 `json:"requests"` } `json:"connections"` Hostname string `json:"hostName"` - StreamFilterZones map[string]map[string]Server `json:"streamFilterZones"` - StreamServerZones map[string]Server `json:"streamServerZones"` - StreamUpstreamZones map[string][]Upstream `json:"streamUpstreamZones"` + StreamFilterZones map[string]map[string]server `json:"streamFilterZones"` + StreamServerZones map[string]server `json:"streamServerZones"` + StreamUpstreamZones map[string][]upstream `json:"streamUpstreamZones"` } -type Server struct { +type server struct { ConnectCounter uint64 `json:"connectCounter"` InBytes uint64 `json:"inBytes"` OutBytes uint64 `json:"outBytes"` @@ -137,7 +137,7 @@ type Server struct { } `json:"responses"` } -type Upstream struct { +type upstream struct { Server string `json:"server"` ConnectCounter uint64 `json:"connectCounter"` InBytes uint64 `json:"inBytes"` @@ -166,7 +166,7 @@ type Upstream struct { func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) - status := &NginxSTSResponse{} + status := &nginxSTSResponse{} if err := dec.Decode(status); err != nil { return errors.New("error while decoding JSON response") } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 46a142878a779..c1d02e5cae9f9 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -34,31 +34,15 @@ type NginxUpstreamCheck struct { client *http.Client } -func NewNginxUpstreamCheck() *NginxUpstreamCheck { - return &NginxUpstreamCheck{ - URL: "http://127.0.0.1/status?format=json", - Method: "GET", - Headers: make(map[string]string), - HostHeader: "", - Timeout: config.Duration(time.Second * 5), - } -} - -func init() { - inputs.Add("nginx_upstream_check", func() telegraf.Input { - return NewNginxUpstreamCheck() - }) -} - -type NginxUpstreamCheckData struct { +type nginxUpstreamCheckData struct { Servers struct { Total uint64 `json:"total"` Generation uint64 `json:"generation"` - Server []NginxUpstreamCheckServer `json:"server"` + Server []nginxUpstreamCheckServer `json:"server"` } `json:"servers"` } -type NginxUpstreamCheckServer struct { +type nginxUpstreamCheckServer struct { Index uint64 `json:"index"` Upstream string `json:"upstream"` Name string `json:"name"` @@ -69,6 +53,33 @@ type NginxUpstreamCheckServer struct { Port uint16 `json:"port"` } +func (*NginxUpstreamCheck) SampleConfig() string { + return sampleConfig +} + +func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { + if check.client == nil { + client, err := check.createHTTPClient() + + if err != nil { + return err + } + check.client = client + } + + statusURL, err := url.Parse(check.URL) + if err != nil { + return err + } + + err = check.gatherStatusData(statusURL.String(), accumulator) + if err != nil { + return err + } + + return nil +} + // createHTTPClient create a clients to access API func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { tlsConfig, err := check.ClientConfig.TLSConfig() @@ -130,35 +141,8 @@ func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{ return nil } -func (*NginxUpstreamCheck) SampleConfig() string { - return sampleConfig -} - -func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { - if check.client == nil { - client, err := check.createHTTPClient() - - if err != nil { - return err - } - check.client = client - } - - statusURL, err := url.Parse(check.URL) - if err != nil { - return err - } - - err = check.gatherStatusData(statusURL.String(), accumulator) - if err != nil { - return err - } - - return nil -} - func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { - checkData := &NginxUpstreamCheckData{} + checkData := &nginxUpstreamCheckData{} err := check.gatherJSONData(address, checkData) if err != nil { @@ -197,3 +181,19 @@ func (check *NginxUpstreamCheck) getStatusCode(status string) uint8 { return 0 } } + +func newNginxUpstreamCheck() *NginxUpstreamCheck { + return &NginxUpstreamCheck{ + URL: "http://127.0.0.1/status?format=json", + Method: "GET", + Headers: make(map[string]string), + HostHeader: "", + Timeout: config.Duration(time.Second * 5), + } +} + +func init() { + inputs.Add("nginx_upstream_check", func() telegraf.Input { + return newNginxUpstreamCheck() + }) +} diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index 4cd10020e3ea5..7eb9b065a892d 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -58,7 +58,7 @@ func TestNginxUpstreamCheckData(test *testing.T) { })) defer testServer.Close() - check := NewNginxUpstreamCheck() + check := newNginxUpstreamCheck() check.URL = testServer.URL + "/status" var accumulator testutil.Accumulator @@ -139,7 +139,7 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { })) defer testServer.Close() - check := NewNginxUpstreamCheck() + check := newNginxUpstreamCheck() check.URL = testServer.URL + "/status" check.Headers["X-test"] = "test-value" check.HostHeader = "status.local" diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index 2dea49d3623bd..68b06edeb1f62 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -106,7 +106,7 @@ func (n *NginxVTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } } -type NginxVTSResponse struct { +type nginxVTSResponse struct { Connections struct { Active uint64 `json:"active"` Reading uint64 `json:"reading"` @@ -116,13 +116,13 @@ type NginxVTSResponse struct { Handled uint64 `json:"handled"` Requests uint64 `json:"requests"` } `json:"connections"` - ServerZones map[string]Server `json:"serverZones"` - FilterZones map[string]map[string]Server `json:"filterZones"` - UpstreamZones map[string][]Upstream `json:"upstreamZones"` - CacheZones map[string]Cache `json:"cacheZones"` + ServerZones map[string]server `json:"serverZones"` + FilterZones map[string]map[string]server `json:"filterZones"` + UpstreamZones map[string][]upstream `json:"upstreamZones"` + CacheZones map[string]cache `json:"cacheZones"` } -type Server struct { +type server struct { RequestCounter uint64 `json:"requestCounter"` InBytes uint64 `json:"inBytes"` OutBytes uint64 `json:"outBytes"` @@ -144,7 +144,7 @@ type Server struct { } `json:"responses"` } -type Upstream struct { +type upstream struct { Server string `json:"server"` RequestCounter uint64 `json:"requestCounter"` InBytes uint64 `json:"inBytes"` @@ -165,7 +165,7 @@ type Upstream struct { Down bool `json:"down"` } -type Cache struct { +type cache struct { MaxSize uint64 `json:"maxSize"` UsedSize uint64 `json:"usedSize"` InBytes uint64 `json:"inBytes"` @@ -184,7 +184,7 @@ type Cache struct { func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) - status := &NginxVTSResponse{} + status := &nginxVTSResponse{} if err := dec.Decode(status); err != nil { return errors.New("error while decoding JSON response") } diff --git a/plugins/inputs/nomad/nomad.go b/plugins/inputs/nomad/nomad.go index 85a9d9636e98b..b297feb235d10 100644 --- a/plugins/inputs/nomad/nomad.go +++ b/plugins/inputs/nomad/nomad.go @@ -18,27 +18,16 @@ import ( //go:embed sample.conf var sampleConfig string -// Nomad configuration object -type Nomad struct { - URL string `toml:"url"` +const timeLayout = "2006-01-02 15:04:05 -0700 MST" +type Nomad struct { + URL string `toml:"url"` ResponseTimeout config.Duration `toml:"response_timeout"` - tls.ClientConfig roundTripper http.RoundTripper } -const timeLayout = "2006-01-02 15:04:05 -0700 MST" - -func init() { - inputs.Add("nomad", func() telegraf.Input { - return &Nomad{ - ResponseTimeout: config.Duration(5 * time.Second), - } - }) -} - func (*Nomad) SampleConfig() string { return sampleConfig } @@ -161,3 +150,11 @@ func buildNomadMetrics(acc telegraf.Accumulator, summaryMetrics *metricsSummary) return nil } + +func init() { + inputs.Add("nomad", func() telegraf.Input { + return &Nomad{ + ResponseTimeout: config.Duration(5 * time.Second), + } + }) +} diff --git a/plugins/inputs/nomad/nomad_metrics.go b/plugins/inputs/nomad/nomad_metrics.go index 8c2d4a1e9eefa..d4eece9bda1e1 100644 --- a/plugins/inputs/nomad/nomad_metrics.go +++ b/plugins/inputs/nomad/nomad_metrics.go @@ -37,6 +37,7 @@ type sampledValue struct { DisplayLabels map[string]string `json:"Labels"` } +// AggregateSample needs to be exported, because JSON decode cannot set embedded pointer to unexported struct type AggregateSample struct { Count int `json:"count"` Rate float64 `json:"rate"` diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index deac3855aa0e7..4704aacd97116 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -21,61 +21,27 @@ import ( //go:embed sample.conf var sampleConfig string -type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) +var ( + defaultBinary = "/usr/sbin/nsd-control" + defaultTimeout = config.Duration(time.Second) +) -// NSD is used to store configuration values type NSD struct { - Binary string - Timeout config.Duration - UseSudo bool - Server string - ConfigFile string + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` + Server string `toml:"server"` + ConfigFile string `toml:"config_file"` run runner } -var defaultBinary = "/usr/sbin/nsd-control" -var defaultTimeout = config.Duration(time.Second) - -// Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server, configFile string) (*bytes.Buffer, error) { - cmdArgs := []string{"stats_noreset"} - - if server != "" { - host, port, err := net.SplitHostPort(server) - if err == nil { - server = host + "@" + port - } - - cmdArgs = append([]string{"-s", server}, cmdArgs...) - } - - if configFile != "" { - cmdArgs = append([]string{"-c", configFile}, cmdArgs...) - } - - cmd := exec.Command(cmdName, cmdArgs...) - - if useSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) - cmd = exec.Command("sudo", cmdArgs...) - } - - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) - if err != nil { - return &out, fmt.Errorf("error running nsd-control: %w (%s %v)", err, cmdName, cmdArgs) - } - - return &out, nil -} +type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) func (*NSD) SampleConfig() string { return sampleConfig } -// Gather collects stats from nsd-control and adds them to the Accumulator func (s *NSD) Gather(acc telegraf.Accumulator) error { out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ConfigFile) if err != nil { @@ -133,6 +99,40 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { return nil } +// Shell out to nsd_stat and return the output +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server, configFile string) (*bytes.Buffer, error) { + cmdArgs := []string{"stats_noreset"} + + if server != "" { + host, port, err := net.SplitHostPort(server) + if err == nil { + server = host + "@" + port + } + + cmdArgs = append([]string{"-s", server}, cmdArgs...) + } + + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) + } + + cmd := exec.Command(cmdName, cmdArgs...) + + if useSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running nsd-control: %w (%s %v)", err, cmdName, cmdArgs) + } + + return &out, nil +} + func init() { inputs.Add("nsd", func() telegraf.Input { return &NSD{ diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index dad4c0e925ff0..8aba9e8852946 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { +func nsdControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } @@ -19,7 +19,7 @@ func NSDControl(output string) func(string, config.Duration, bool, string, strin func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &NSD{ - run: NSDControl(fullOutput), + run: nsdControl(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index b124d28ee3707..b9c3b4b88053a 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -42,25 +42,15 @@ import ( //go:embed sample.conf var sampleConfig string -// Might add Lookupd endpoints for cluster discovery -type NSQ struct { - Endpoints []string - tls.ClientConfig - httpClient *http.Client -} - const ( requestPattern = `%s/stats?format=json` ) -func init() { - inputs.Add("nsq", func() telegraf.Input { - return New() - }) -} +type NSQ struct { + Endpoints []string `toml:"endpoints"` -func New() *NSQ { - return &NSQ{} + tls.ClientConfig + httpClient *http.Client } func (*NSQ) SampleConfig() string { @@ -305,3 +295,13 @@ type clientStats struct { TLSNegotiatedProtocol string `json:"tls_negotiated_protocol"` TLSNegotiatedProtocolIsMutual bool `json:"tls_negotiated_protocol_is_mutual"` } + +func newNSQ() *NSQ { + return &NSQ{} +} + +func init() { + inputs.Add("nsq", func() telegraf.Input { + return newNSQ() + }) +} diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index b2887c2b62edb..713585208fec0 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -23,7 +23,7 @@ func TestNSQStatsV1(t *testing.T) { })) defer ts.Close() - n := New() + n := newNSQ() n.Endpoints = []string{ts.URL} var acc testutil.Accumulator @@ -283,7 +283,7 @@ func TestNSQStatsPreV1(t *testing.T) { })) defer ts.Close() - n := New() + n := newNSQ() n.Endpoints = []string{ts.URL} var acc testutil.Accumulator diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 6117558874559..69f2a0aea73a1 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -20,40 +20,39 @@ const ( defaultMaxUndeliveredMessages = 1000 ) -type empty struct{} -type semaphore chan empty - -type logger struct { - log telegraf.Logger -} - -func (l *logger) Output(_ int, s string) error { - l.log.Debug(s) - return nil -} - -// NSQConsumer represents the configuration of the plugin type NSQConsumer struct { - Server string `toml:"server" deprecated:"1.5.0;1.35.0;use 'nsqd' instead"` - Nsqd []string `toml:"nsqd"` - Nsqlookupd []string `toml:"nsqlookupd"` - Topic string `toml:"topic"` - Channel string `toml:"channel"` - MaxInFlight int `toml:"max_in_flight"` - - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Server string `toml:"server" deprecated:"1.5.0;1.35.0;use 'nsqd' instead"` + Nsqd []string `toml:"nsqd"` + Nsqlookupd []string `toml:"nsqlookupd"` + Topic string `toml:"topic"` + Channel string `toml:"channel"` + MaxInFlight int `toml:"max_in_flight"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Log telegraf.Logger `toml:"-"` parser telegraf.Parser consumer *nsq.Consumer - Log telegraf.Logger - mu sync.Mutex messages map[telegraf.TrackingID]*nsq.Message wg sync.WaitGroup cancel context.CancelFunc } +type ( + empty struct{} + semaphore chan empty +) + +type logger struct { + log telegraf.Logger +} + +func (l *logger) Output(_ int, s string) error { + l.log.Debug(s) + return nil +} + func (*NSQConsumer) SampleConfig() string { return sampleConfig } @@ -77,7 +76,6 @@ func (n *NSQConsumer) SetParser(parser telegraf.Parser) { n.parser = parser } -// Start pulls data from nsq func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { acc := ac.WithTracking(n.MaxUndeliveredMessages) sem := make(semaphore, n.MaxUndeliveredMessages) @@ -140,6 +138,17 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil } +func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (n *NSQConsumer) Stop() { + n.cancel() + n.wg.Wait() + n.consumer.Stop() + <-n.consumer.StopChan +} + func (n *NSQConsumer) onDelivery(ctx context.Context, acc telegraf.TrackingAccumulator, sem semaphore) { for { select { @@ -165,19 +174,6 @@ func (n *NSQConsumer) onDelivery(ctx context.Context, acc telegraf.TrackingAccum } } -// Stop processing messages -func (n *NSQConsumer) Stop() { - n.cancel() - n.wg.Wait() - n.consumer.Stop() - <-n.consumer.StopChan -} - -// Gather is a noop -func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { - return nil -} - func (n *NSQConsumer) connect() error { if n.consumer == nil { config := nsq.NewConfig() diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 7f517f7b55506..b6b149c867918 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -20,20 +20,18 @@ var ( colonByte = []byte(":") ) -// default file paths const ( - NetNetstat = "/net/netstat" - NetSnmp = "/net/snmp" - NetSnmp6 = "/net/snmp6" - NetProc = "/proc" -) - -// env variable names -const ( - EnvNetstat = "PROC_NET_NETSTAT" - EnvSnmp = "PROC_NET_SNMP" - EnvSnmp6 = "PROC_NET_SNMP6" - EnvRoot = "PROC_ROOT" + // default file paths + netNetstat = "/net/netstat" + netSnmp = "/net/snmp" + netSnmp6 = "/net/snmp6" + netProc = "/proc" + + // env variable names + envNetstat = "PROC_NET_NETSTAT" + envSnmp = "PROC_NET_SNMP" + envSnmp6 = "PROC_NET_SNMP6" + envRoot = "PROC_ROOT" ) type Nstat struct { @@ -104,13 +102,13 @@ func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) { // if it is empty then try read from env variables func (ns *Nstat) loadPaths() { if ns.ProcNetNetstat == "" { - ns.ProcNetNetstat = proc(EnvNetstat, NetNetstat) + ns.ProcNetNetstat = proc(envNetstat, netNetstat) } if ns.ProcNetSNMP == "" { - ns.ProcNetSNMP = proc(EnvSnmp, NetSnmp) + ns.ProcNetSNMP = proc(envSnmp, netSnmp) } if ns.ProcNetSNMP6 == "" { - ns.ProcNetSNMP6 = proc(EnvSnmp6, NetSnmp6) + ns.ProcNetSNMP6 = proc(envSnmp6, netSnmp6) } } @@ -188,9 +186,9 @@ func proc(env, path string) string { return p } // try to read root path, or use default root path - root := os.Getenv(EnvRoot) + root := os.Getenv(envRoot) if root == "" { - root = NetProc + root = netProc } return root + path } diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index fc5254f18bf70..e24b1f1a94f3d 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -30,16 +30,25 @@ var reBrackets = regexp.MustCompile(`\s+\([\S]*`) type elementType int64 const ( - None elementType = iota - Tag - FieldFloat - FieldDuration - FieldIntDecimal - FieldIntOctal - FieldIntRatio8 - FieldIntBits + none elementType = iota + tag + fieldFloat + fieldDuration + fieldIntDecimal + fieldIntOctal + fieldIntRatio8 + fieldIntBits ) +type NTPQ struct { + DNSLookup bool `toml:"dns_lookup" deprecated:"1.24.0;1.35.0;add '-n' to 'options' instead to skip DNS lookup"` + Options string `toml:"options"` + Servers []string `toml:"servers"` + ReachFormat string `toml:"reach_format"` + + runQ func(string) ([]byte, error) +} + type column struct { name string etype elementType @@ -55,21 +64,12 @@ var tagHeaders = map[string]string{ // Mapping of fields var fieldElements = map[string]elementType{ - "delay": FieldFloat, - "jitter": FieldFloat, - "offset": FieldFloat, - "reach": FieldIntDecimal, - "poll": FieldDuration, - "when": FieldDuration, -} - -type NTPQ struct { - DNSLookup bool `toml:"dns_lookup" deprecated:"1.24.0;1.35.0;add '-n' to 'options' instead to skip DNS lookup"` - Options string `toml:"options"` - Servers []string `toml:"servers"` - ReachFormat string `toml:"reach_format"` - - runQ func(string) ([]byte, error) + "delay": fieldFloat, + "jitter": fieldFloat, + "offset": fieldFloat, + "reach": fieldIntDecimal, + "poll": fieldDuration, + "when": fieldDuration, } func (*NTPQ) SampleConfig() string { @@ -117,19 +117,19 @@ func (n *NTPQ) Init() error { n.ReachFormat = "octal" // Interpret the field as decimal integer returning // the raw (octal) representation - fieldElements["reach"] = FieldIntDecimal + fieldElements["reach"] = fieldIntDecimal case "decimal": // Interpret the field as octal integer returning // decimal number representation - fieldElements["reach"] = FieldIntOctal + fieldElements["reach"] = fieldIntOctal case "count": // Interpret the field as bits set returning // the number of bits set - fieldElements["reach"] = FieldIntBits + fieldElements["reach"] = fieldIntBits case "ratio": // Interpret the field as ratio between the number of // bits set and the maximum available bits set (8). - fieldElements["reach"] = FieldIntRatio8 + fieldElements["reach"] = fieldIntRatio8 default: return fmt.Errorf("unknown 'reach_format' %q", n.ReachFormat) } @@ -176,7 +176,7 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { if name, isTag := tagHeaders[el]; isTag { columns = append(columns, column{ name: name, - etype: Tag, + etype: tag, }) continue } @@ -191,7 +191,7 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { } // Skip the column if not found - columns = append(columns, column{etype: None}) + columns = append(columns, column{etype: none}) } break } @@ -221,11 +221,11 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { col := columns[i] switch col.etype { - case None: + case none: continue - case Tag: + case tag: tags[col.name] = raw - case FieldFloat: + case fieldFloat: value, err := strconv.ParseFloat(raw, 64) if err != nil { msg := fmt.Sprintf("%sparsing %q (%v) as float failed", msgPrefix, col.name, raw) @@ -233,7 +233,7 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { continue } fields[col.name] = value - case FieldDuration: + case fieldDuration: // Ignore fields only containing a minus if raw == "-" { continue @@ -257,28 +257,28 @@ func (n *NTPQ) gatherServer(acc telegraf.Accumulator, server string) { continue } fields[col.name] = value * factor - case FieldIntDecimal: + case fieldIntDecimal: value, err := strconv.ParseInt(raw, 10, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) continue } fields[col.name] = value - case FieldIntOctal: + case fieldIntOctal: value, err := strconv.ParseInt(raw, 8, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) continue } fields[col.name] = value - case FieldIntBits: + case fieldIntBits: value, err := strconv.ParseUint(raw, 8, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) continue } fields[col.name] = bits.OnesCount64(value) - case FieldIntRatio8: + case fieldIntRatio8: value, err := strconv.ParseUint(raw, 8, 64) if err != nil { acc.AddError(fmt.Errorf("parsing %q (%v) as int failed: %w", col.name, raw, err)) diff --git a/plugins/inputs/nvidia_smi/common/setters.go b/plugins/inputs/nvidia_smi/common/setters.go index 4c5e0772578c4..c74cbfe246fbb 100644 --- a/plugins/inputs/nvidia_smi/common/setters.go +++ b/plugins/inputs/nvidia_smi/common/setters.go @@ -5,12 +5,14 @@ import ( "strings" ) +// SetTagIfUsed sets those tags whose value is different from empty string. func SetTagIfUsed(m map[string]string, k, v string) { if v != "" { m[k] = v } } +// SetIfUsed sets those fields whose value is different from empty string. func SetIfUsed(t string, m map[string]interface{}, k, v string) { vals := strings.Fields(v) if len(vals) < 1 { diff --git a/plugins/inputs/nvidia_smi/schema_v11/parser.go b/plugins/inputs/nvidia_smi/schema_v11/parser.go index ae45117d6008d..05864790c4808 100644 --- a/plugins/inputs/nvidia_smi/schema_v11/parser.go +++ b/plugins/inputs/nvidia_smi/schema_v11/parser.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi/common" ) +// Parse parses the XML-encoded data from nvidia-smi and adds measurements. func Parse(acc telegraf.Accumulator, buf []byte) error { var s smi if err := xml.Unmarshal(buf, &s); err != nil { diff --git a/plugins/inputs/nvidia_smi/schema_v12/parser.go b/plugins/inputs/nvidia_smi/schema_v12/parser.go index 4bc88bd017e70..bafd884017596 100644 --- a/plugins/inputs/nvidia_smi/schema_v12/parser.go +++ b/plugins/inputs/nvidia_smi/schema_v12/parser.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi/common" ) +// Parse parses the XML-encoded data from nvidia-smi and adds measurements. func Parse(acc telegraf.Accumulator, buf []byte) error { var s smi if err := xml.Unmarshal(buf, &s); err != nil { From e0170fa26709d4ff6823ce0824118e58fd07f43c Mon Sep 17 00:00:00 2001 From: Mingyang Zheng Date: Mon, 25 Nov 2024 02:26:09 -0800 Subject: [PATCH 104/170] fix(logging): Add Close() func for redirectLogger (#16219) --- logger/handler.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/logger/handler.go b/logger/handler.go index 7f23b32b0a6b6..6bc066658d183 100644 --- a/logger/handler.go +++ b/logger/handler.go @@ -125,3 +125,13 @@ func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix str msg := append([]interface{}{ts.In(time.UTC).Format(time.RFC3339), " ", level.Indicator(), " ", prefix + attrMsg}, args...) fmt.Fprintln(l.writer, msg...) } + +func (l *redirectLogger) Close() error { + if l.writer == os.Stderr { + return nil + } + if closer, ok := l.writer.(io.Closer); ok { + return closer.Close() + } + return nil +} From be927ae47eaedda6cb408c77e0bbdaf63fb8de2d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:46:39 +0100 Subject: [PATCH 105/170] chore(deps): Bump github.com/vishvananda/netns from 0.0.4 to 0.0.5 (#16199) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 9c0ae6d3c9fb5..2dede13e17fd7 100644 --- a/go.mod +++ b/go.mod @@ -199,7 +199,7 @@ require ( github.com/urfave/cli/v2 v2.27.2 github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241 github.com/vishvananda/netlink v1.3.0 - github.com/vishvananda/netns v0.0.4 + github.com/vishvananda/netns v0.0.5 github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.45.1 github.com/wavefronthq/wavefront-sdk-go v0.15.0 diff --git a/go.sum b/go.sum index af13981e637d2..d464963735c23 100644 --- a/go.sum +++ b/go.sum @@ -2361,8 +2361,9 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.45.1 h1:pmMmSUNIw/kePaCRFaUOpDh7IxDfhDi9M4Qh+DRlBV4= From 937a0e54233e1957dc38c2cee912108562385062 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:07:29 -0600 Subject: [PATCH 106/170] chore(deps): Bump golang.org/x/net from 0.30.0 to 0.31.0 (#16236) --- go.mod | 8 ++++---- go.sum | 8 ++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2dede13e17fd7..9ab2e41a1342c 100644 --- a/go.mod +++ b/go.mod @@ -213,13 +213,13 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net v0.0.0-20240925182052-1207426daebd go.step.sm/crypto v0.54.0 - golang.org/x/crypto v0.28.0 + golang.org/x/crypto v0.29.0 golang.org/x/mod v0.21.0 - golang.org/x/net v0.30.0 + golang.org/x/net v0.31.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 - golang.org/x/sys v0.26.0 - golang.org/x/term v0.25.0 + golang.org/x/sys v0.27.0 + golang.org/x/term v0.26.0 golang.org/x/text v0.20.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 gonum.org/v1/gonum v0.15.1 diff --git a/go.sum b/go.sum index d464963735c23..7c38aef6a73bd 100644 --- a/go.sum +++ b/go.sum @@ -2536,6 +2536,8 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2691,6 +2693,8 @@ golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2879,6 +2883,8 @@ golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2899,6 +2905,8 @@ golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 97744ba306e77958142d65c45f3404dc6a884d90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:07:53 -0600 Subject: [PATCH 107/170] chore(deps): Bump cloud.google.com/go/bigquery from 1.63.1 to 1.64.0 (#16232) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9ab2e41a1342c..17edb3b2f0cfa 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/influxdata/telegraf go 1.23.0 require ( - cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/monitoring v1.21.1 cloud.google.com/go/pubsub v1.45.1 cloud.google.com/go/storage v1.43.0 diff --git a/go.sum b/go.sum index 7c38aef6a73bd..a2fa1ad35aff4 100644 --- a/go.sum +++ b/go.sum @@ -131,8 +131,8 @@ cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/Zur cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= +cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= From 38864e6fdd8335852f54443c30fc80fbd808d5af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:08:23 -0600 Subject: [PATCH 108/170] chore(deps): Bump google.golang.org/grpc from 1.67.1 to 1.68.0 (#16233) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 17edb3b2f0cfa..b6a48e188cce0 100644 --- a/go.mod +++ b/go.mod @@ -225,7 +225,7 @@ require ( gonum.org/v1/gonum v0.15.1 google.golang.org/api v0.203.0 google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 - google.golang.org/grpc v1.67.1 + google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.1 gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/olivere/elastic.v5 v5.0.86 diff --git a/go.sum b/go.sum index a2fa1ad35aff4..02f1b2d1cbc8d 100644 --- a/go.sum +++ b/go.sum @@ -3293,8 +3293,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 3434e4f7464b081ea2f3ec05e7758706afd04315 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:09:00 -0600 Subject: [PATCH 109/170] chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.29.3 to 1.32.6 (#16234) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b6a48e188cce0..54ba5fdb75b72 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.36.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 - github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6 github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 github.com/aws/smithy-go v1.22.1 @@ -278,7 +278,7 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/awnumar/memcall v0.3.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect diff --git a/go.sum b/go.sum index 02f1b2d1cbc8d..87fe3b6f0fced 100644 --- a/go.sum +++ b/go.sum @@ -862,8 +862,8 @@ github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJ github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= @@ -921,8 +921,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4G github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 h1:ktR7RUdUQ8m9rkgCPRsS7iTJgFp9MXEX0nltrT8bxY4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3/go.mod h1:hufTMUGSlcBLGgs6leSPbDfY1sM3mrO2qjtVkPMTDhE= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6 h1:yN7WEx9ksiP5+9zdKtoQYrUT51HvYw+EA1TXsElvMyk= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6/go.mod h1:j8MNat6qtGw5OoEACRbWtT8r5my4nRWfM/6Uk+NsuC4= github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC9OpLedkZ51DWl8= github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= From a0e9bc42cfcc03b3c27dfc78c266f068e5eed9f3 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:10:03 +0100 Subject: [PATCH 110/170] chore(actions): Only check PR title for semantic commit message (#16253) --- .github/workflows/semantic.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml index 7dc9f439d0460..ea869d99b4c54 100644 --- a/.github/workflows/semantic.yml +++ b/.github/workflows/semantic.yml @@ -11,5 +11,5 @@ jobs: semantic: uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main with: - CHECK_PR_TITLE_OR_ONE_COMMIT: true + COMMITS_HISTORY: 0 From fa2cd63bc97054ab6a267355b109cd87128350c5 Mon Sep 17 00:00:00 2001 From: Baker X <40594937+Benxiaohai001@users.noreply.github.com> Date: Wed, 4 Dec 2024 01:10:36 +0800 Subject: [PATCH 111/170] docs(serializers.json): Fix typo (#16245) --- plugins/serializers/json/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md index 78b9ee63a332c..edf50ee8bb45f 100644 --- a/plugins/serializers/json/README.md +++ b/plugins/serializers/json/README.md @@ -102,7 +102,7 @@ reference the documentation for the specific plugin. ## Transformations Transformations using the [JSONata standard](https://jsonata.org/) can be specified with -the `json_tansformation` parameter. The input to the transformation is the serialized +the `json_transformation` parameter. The input to the transformation is the serialized metric in the standard-form above. **Note**: There is a difference in batch and non-batch serialization mode! From 84d1db92295adaf68f35e44863e89bac1ab75c63 Mon Sep 17 00:00:00 2001 From: Jose Luis Gonzalez Calvo <90149790+joseluisgonzalezca@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:12:38 +0100 Subject: [PATCH 112/170] fix(inputs.netflow): Decode flags in TCP and IP headers correctly (#16248) Co-authored-by: jlgonzalez --- plugins/inputs/netflow/sflow_v5.go | 33 ++++++++++++++++++------------ 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/netflow/sflow_v5.go b/plugins/inputs/netflow/sflow_v5.go index 6e43680f3a597..7ac616bf54ebd 100644 --- a/plugins/inputs/netflow/sflow_v5.go +++ b/plugins/inputs/netflow/sflow_v5.go @@ -391,12 +391,13 @@ func (d *sflowv5Decoder) decodeRawHeaderSample(record *sflow.SampledHeader) (map fields["dst"] = l.DstIP.String() flags := []byte("........") - switch { - case l.Flags&layers.IPv4EvilBit > 0: + if l.Flags&layers.IPv4EvilBit > 0 { flags[7] = byte('E') - case l.Flags&layers.IPv4DontFragment > 0: + } + if l.Flags&layers.IPv4DontFragment > 0 { flags[6] = byte('D') - case l.Flags&layers.IPv4MoreFragments > 0: + } + if l.Flags&layers.IPv4MoreFragments > 0 { flags[5] = byte('M') } fields["fragment_flags"] = string(flags) @@ -418,22 +419,28 @@ func (d *sflowv5Decoder) decodeRawHeaderSample(record *sflow.SampledHeader) (map fields["tcp_window_size"] = l.Window fields["tcp_urgent_ptr"] = l.Urgent flags := []byte("........") - switch { - case l.FIN: + if l.FIN { flags[7] = byte('F') - case l.SYN: + } + if l.SYN { flags[6] = byte('S') - case l.RST: + } + if l.RST { flags[5] = byte('R') - case l.PSH: + } + if l.PSH { flags[4] = byte('P') - case l.ACK: + } + if l.ACK { flags[3] = byte('A') - case l.URG: + } + if l.URG { flags[2] = byte('U') - case l.ECE: + } + if l.ECE { flags[1] = byte('E') - case l.CWR: + } + if l.CWR { flags[0] = byte('C') } fields["tcp_flags"] = string(flags) From f2133ebf3fda7add501f75c038a2131c342404bd Mon Sep 17 00:00:00 2001 From: Long FlyBridge Date: Wed, 4 Dec 2024 01:13:13 +0800 Subject: [PATCH 113/170] chore: Fix function names in comments (#16231) Signed-off-by: longxiangqiao --- agent/agent.go | 2 +- config/types.go | 2 +- plugins/inputs/ipmi_sensor/ipmi_sensor_test.go | 4 ++-- plugins/inputs/sensors/sensors_test.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 6ebc4fcdc6a1a..7f00fc6ca9ff5 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -811,7 +811,7 @@ func (a *Agent) startOutputs( return src, unit, nil } -// connectOutputs connects to all outputs. +// connectOutput connects to all outputs. func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error { log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) if err := output.Connect(); err != nil { diff --git a/config/types.go b/config/types.go index 4a8a2822a50e5..f6a7c00ea5130 100644 --- a/config/types.go +++ b/config/types.go @@ -19,7 +19,7 @@ type Duration time.Duration // Size is an int64 type Size int64 -// UnmarshalTOML parses the duration from the TOML config file +// UnmarshalText parses the duration from the Text config file func (d *Duration) UnmarshalText(b []byte) error { // convert to string durStr := string(b) diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go index 25b4576b64bbd..74dab5eb18b8f 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go @@ -214,7 +214,7 @@ func TestGather(t *testing.T) { } } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} @@ -536,7 +536,7 @@ func TestGatherV2(t *testing.T) { } } -// fackeExecCommandV2 is a helper function that mock +// fakeExecCommandV2 is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommandV2(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcessV2", "--", command} diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 47b8e8cbbce70..2a94cea3aa38a 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -290,7 +290,7 @@ func TestGatherNotRemoveNumbers(t *testing.T) { } } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} From a125d3eb48c79b736673adceb8965d383fe0915f Mon Sep 17 00:00:00 2001 From: stackcoder Date: Tue, 3 Dec 2024 18:14:02 +0100 Subject: [PATCH 114/170] feat(inputs.smart): Add Power on Hours and Cycle Count (#16230) --- plugins/inputs/smart/smart.go | 2 ++ plugins/inputs/smart/smart_test.go | 14 ++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index dec5a45bc6288..e704971d43b76 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -88,6 +88,8 @@ var ( "1": "read_error_rate", "5": "reallocated_sectors_count", "7": "seek_error_rate", + "9": "power_on_hours", + "12": "power_cycle_count", "10": "spin_retry_count", "184": "end_to_end_error", "187": "uncorrectable_errors", diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 449460838bb56..884435d3ccd90 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -53,7 +53,7 @@ func TestGatherAttributes(t *testing.T) { err := s.Gather(&acc) require.NoError(t, err) - require.Equal(t, 68, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 70, acc.NFields(), "Wrong number of fields gathered") for _, test := range testsAda0Attributes { acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags) @@ -172,7 +172,7 @@ func TestGatherNoAttributes(t *testing.T) { err := s.Gather(&acc) require.NoError(t, err) - require.Equal(t, 11, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 13, acc.NFields(), "Wrong number of fields gathered") acc.AssertDoesNotContainMeasurement(t, "smart_attribute") for _, test := range testsAda0Device { @@ -213,7 +213,7 @@ func TestGatherSATAInfo(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 106, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 108, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -229,7 +229,7 @@ func TestGatherSATAInfo65(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 96, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 98, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -294,7 +294,7 @@ func TestGatherSSD(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 110, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 112, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -310,7 +310,7 @@ func TestGatherSSDRaid(t *testing.T) { wg.Add(1) sampleSmart.gatherDisk(acc, "", wg) - require.Equal(t, 77, acc.NFields(), "Wrong number of fields gathered") + require.Equal(t, 79, acc.NFields(), "Wrong number of fields gathered") require.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -1492,6 +1492,8 @@ var ( "wear_leveling_count": int64(185), "pending_sector_count": int64(0), "reallocated_sectors_count": int64(0), + "power_cycle_count": int64(14879), + "power_on_hours": int64(2988), }, map[string]string{ "device": "ada0", From 9e4174dbd8a605342ce592b7cb3c6f990f8dcea9 Mon Sep 17 00:00:00 2001 From: Phil Bracikowski <13472206+philjb@users.noreply.github.com> Date: Tue, 3 Dec 2024 09:18:46 -0800 Subject: [PATCH 115/170] fix(inputs.procstat): Handle running processes correctly across multiple filters (#16257) --- plugins/inputs/procstat/procstat.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6a50cb317ad2b..4e3e4df6d38c0 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -324,7 +324,7 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { now := time.Now() - + running := make(map[PID]bool) for _, f := range p.Filter { groups, err := f.ApplyFilter() if err != nil { @@ -347,7 +347,6 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { } var count int - running := make(map[PID]bool) for _, g := range groups { count += len(g.processes) for _, gp := range g.processes { @@ -397,13 +396,6 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { } } - // Cleanup processes that are not running anymore - for pid := range p.processes { - if !running[pid] { - delete(p.processes, pid) - } - } - // Add lookup statistics-metric acc.AddFields( "procstat_lookup", @@ -419,6 +411,13 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { now, ) } + + // Cleanup processes that are not running anymore across all filters/groups + for pid := range p.processes { + if !running[pid] { + delete(p.processes, pid) + } + } return nil } From a096658af723474a23b4335d4a0b0186a3b606c0 Mon Sep 17 00:00:00 2001 From: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> Date: Wed, 4 Dec 2024 03:59:21 -0600 Subject: [PATCH 116/170] docs: Fix PostgreSQL example DSN (#16229) --- docs/SQL_DRIVERS_INPUT.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md index 54b2519f8f207..458c6a0931a28 100644 --- a/docs/SQL_DRIVERS_INPUT.md +++ b/docs/SQL_DRIVERS_INPUT.md @@ -3,20 +3,20 @@ This is a list of available drivers for the SQL input plugin. The data-source-name (DSN) is driver specific and might change between versions. Please check the driver documentation for available options and the format. -| database | driver | aliases | example DSN | comment | -| -------------------- | --------------------------------------------------------- | --------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -| ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | -| CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver | -| FlightSQL | [flightsql](https://github.com/apache/arrow/tree/main/go/arrow/flight/flightsql/driver) | | `flightsql://[username[:password]@]host:port?timeout=10s[&token=TOKEN][¶m1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/apache/arrow/blob/main/go/arrow/flight/flightsql/driver/README.md) for more information | -| IBM Netezza | [nzgo](https://github.com/IBM/nzgo) | |`host=your_nz_host port=5480 user=your_nz_user password=your_nz_password dbname=your_nz_db_name sslmode=disable`| see [driver docs](https://pkg.go.dev/github.com/IBM/nzgo/v12) for more | -| MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | -| Microsoft SQL Server | [sqlserver](https://github.com/microsoft/go-mssqldb) | mssql | `sqlserver://username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver | -| MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information | -| Oracle | [oracle](https://github.com/sijms/go-ora) | oracle | `oracle://username:password@host:port/service?param1=value¶m2=value` | see [driver docs](https://github.com/sijms/go-ora/blob/master/README.md) for more information | -| PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information | -| SAP HANA | [go-hdb](https://github.com/SAP/go-hdb) | hana | `hdb://user:password@host:port` | see [driver docs](https://github.com/SAP/go-hdb) for more information | -| SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docs](https://pkg.go.dev/modernc.org/sqlite) for more information | -| TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| database | driver | aliases | example DSN | comment | +| -------------------- | --------------------------------------------------------- | --------------- |------------------------------------------------------------------------------------------------------------------| --------------------------------------------------------------------------------------------------------------------- | +| ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | +| CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver | +| FlightSQL | [flightsql](https://github.com/apache/arrow/tree/main/go/arrow/flight/flightsql/driver) | | `flightsql://[username[:password]@]host:port?timeout=10s[&token=TOKEN][¶m1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/apache/arrow/blob/main/go/arrow/flight/flightsql/driver/README.md) for more information | +| IBM Netezza | [nzgo](https://github.com/IBM/nzgo) | | `host=your_nz_host port=5480 user=your_nz_user password=your_nz_password dbname=your_nz_db_name sslmode=disable` | see [driver docs](https://pkg.go.dev/github.com/IBM/nzgo/v12) for more | +| MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| Microsoft SQL Server | [sqlserver](https://github.com/microsoft/go-mssqldb) | mssql | `sqlserver://username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver | +| MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information | +| Oracle | [oracle](https://github.com/sijms/go-ora) | oracle | `oracle://username:password@host:port/service?param1=value¶m2=value` | see [driver docs](https://github.com/sijms/go-ora/blob/master/README.md) for more information | +| PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `postgresql://[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information | +| SAP HANA | [go-hdb](https://github.com/SAP/go-hdb) | hana | `hdb://user:password@host:port` | see [driver docs](https://github.com/SAP/go-hdb) for more information | +| SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docs](https://pkg.go.dev/modernc.org/sqlite) for more information | +| TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | ## Comments From 8ec078e4a1d547932916b2b3975fd5733b991af0 Mon Sep 17 00:00:00 2001 From: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:30:15 -0600 Subject: [PATCH 117/170] feat(inputs.vsphere): Add cpu temperature field (#16109) --- plugins/inputs/vsphere/README.md | 2 ++ plugins/inputs/vsphere/finder.go | 1 + 2 files changed, 3 insertions(+) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 97184ec0ae09a..3324c088c9264 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -551,6 +551,8 @@ override the default query interval in the vSphere plugin. * Power: energy, usage * Datastore stats: * Disk: Capacity, provisioned, used +* Numeric Sensor stats: + * CPU: temperature For a detailed list of commonly available metrics, please refer to [METRICS.md](METRICS.md) diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 8d58653c1c1d7..529a6ca92d039 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -263,6 +263,7 @@ func init() { "Datastore": {"parent", "info", "customValue"}, "ClusterComputeResource": {"parent", "customValue"}, "Datacenter": {"parent", "customValue"}, + "HostNumericSensorInfo": {"parent", "temperature", "baseUnits"}, } containers = map[string]interface{}{ From 83da4fa7050b8c117bde1f7379664443ecba2cbb Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 4 Dec 2024 21:55:11 +0100 Subject: [PATCH 118/170] feat(outputs): Implement partial write errors (#16146) --- internal/errors.go | 20 +++ models/buffer.go | 89 ++++++++--- models/buffer_disk.go | 129 +++++++++++----- models/buffer_disk_test.go | 10 +- models/buffer_mem.go | 68 ++++----- models/buffer_mem_test.go | 5 +- models/buffer_suite_test.go | 275 +++++++++++++++++++++------------- models/running_output.go | 51 ++++--- models/running_output_test.go | 1 + 9 files changed, 426 insertions(+), 222 deletions(-) diff --git a/internal/errors.go b/internal/errors.go index a1f58c3eb2510..d1e098ea441ce 100644 --- a/internal/errors.go +++ b/internal/errors.go @@ -37,3 +37,23 @@ func (e *FatalError) Error() string { func (e *FatalError) Unwrap() error { return e.Err } + +// PartialWriteError indicate that only a subset of the metrics were written +// successfully (i.e. accepted). The rejected metrics should be removed from +// the buffer without being successfully written. Please note: the metrics +// are specified as indices into the batch to be able to reference tracking +// metrics correctly. +type PartialWriteError struct { + Err error + MetricsAccept []int + MetricsReject []int + MetricsRejectErrors []error +} + +func (e *PartialWriteError) Error() string { + return e.Err.Error() +} + +func (e *PartialWriteError) Unwrap() error { + return e.Err +} diff --git a/models/buffer.go b/models/buffer.go index f20e6ee2f9c08..167f56639818f 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -10,12 +10,57 @@ import ( ) var ( - AgentMetricsWritten = selfstat.Register("agent", "metrics_written", make(map[string]string)) - AgentMetricsDropped = selfstat.Register("agent", "metrics_dropped", make(map[string]string)) + AgentMetricsWritten = selfstat.Register("agent", "metrics_written", make(map[string]string)) + AgentMetricsRejected = selfstat.Register("agent", "metrics_rejected", make(map[string]string)) + AgentMetricsDropped = selfstat.Register("agent", "metrics_dropped", make(map[string]string)) registerGob = sync.OnceFunc(func() { metric.Init() }) ) +type Transaction struct { + // Batch of metrics to write + Batch []telegraf.Metric + + // Accept denotes the indices of metrics that were successfully written + Accept []int + // Reject denotes the indices of metrics that were not written but should + // not be requeued + Reject []int + + // Marks this transaction as valid + valid bool + + // Internal state that can be used by the buffer implementation + state interface{} +} + +func (tx *Transaction) AcceptAll() { + tx.Accept = make([]int, len(tx.Batch)) + for i := range tx.Batch { + tx.Accept[i] = i + } +} + +func (tx *Transaction) KeepAll() {} + +func (tx *Transaction) InferKeep() []int { + used := make([]bool, len(tx.Batch)) + for _, idx := range tx.Accept { + used[idx] = true + } + for _, idx := range tx.Reject { + used[idx] = true + } + + keep := make([]int, 0, len(tx.Batch)) + for i := range tx.Batch { + if !used[i] { + keep = append(keep, i) + } + } + return keep +} + type Buffer interface { // Len returns the number of metrics currently in the buffer. Len() int @@ -23,19 +68,15 @@ type Buffer interface { // Add adds metrics to the buffer and returns number of dropped metrics. Add(metrics ...telegraf.Metric) int - // Batch returns a slice containing up to batchSize of the oldest metrics not - // yet dropped. Metrics are ordered from oldest to newest in the batch. The - // batch must not be modified by the client. - Batch(batchSize int) []telegraf.Metric - - // Accept marks the batch, acquired from Batch(), as successfully written. - Accept(metrics []telegraf.Metric) + // Batch starts a transaction by returning a slice of metrics up to the + // given batch-size starting from the oldest metric in the buffer. Metrics + // are ordered from oldest to newest and must not be modified by the plugin. + BeginTransaction(batchSize int) *Transaction - // Reject returns the batch, acquired from Batch(), to the buffer and marks it - // as unsent. - Reject([]telegraf.Metric) + // Flush ends a metric and persists the buffer state + EndTransaction(*Transaction) - // Stats returns the buffer statistics such as rejected, dropped and accepred metrics + // Stats returns the buffer statistics such as rejected, dropped and accepted metrics Stats() BufferStats // Close finalizes the buffer and closes all open resources @@ -45,11 +86,12 @@ type Buffer interface { // BufferStats holds common metrics used for buffer implementations. // Implementations of Buffer should embed this struct in them. type BufferStats struct { - MetricsAdded selfstat.Stat - MetricsWritten selfstat.Stat - MetricsDropped selfstat.Stat - BufferSize selfstat.Stat - BufferLimit selfstat.Stat + MetricsAdded selfstat.Stat + MetricsWritten selfstat.Stat + MetricsRejected selfstat.Stat + MetricsDropped selfstat.Stat + BufferSize selfstat.Stat + BufferLimit selfstat.Stat } // NewBuffer returns a new empty Buffer with the given capacity. @@ -84,6 +126,11 @@ func NewBufferStats(name, alias string, capacity int) BufferStats { "metrics_written", tags, ), + MetricsRejected: selfstat.Register( + "write", + "metrics_rejected", + tags, + ), MetricsDropped: selfstat.Register( "write", "metrics_dropped", @@ -115,6 +162,12 @@ func (b *BufferStats) metricWritten(m telegraf.Metric) { m.Accept() } +func (b *BufferStats) metricRejected(m telegraf.Metric) { + AgentMetricsRejected.Incr(1) + b.MetricsRejected.Incr(1) + m.Reject() +} + func (b *BufferStats) metricDropped(m telegraf.Metric) { AgentMetricsDropped.Incr(1) b.MetricsDropped.Incr(1) diff --git a/models/buffer_disk.go b/models/buffer_disk.go index 57836dbab9070..799ac24758cb1 100644 --- a/models/buffer_disk.go +++ b/models/buffer_disk.go @@ -5,6 +5,8 @@ import ( "fmt" "log" "path/filepath" + "slices" + "sort" "sync" "github.com/tidwall/wal" @@ -31,6 +33,11 @@ type DiskBuffer struct { // we have to do our best and track that the walfile "should" be empty, so that next // write, we can remove the invalid entry (also skipping this entry if it is being read). isEmpty bool + + // The mask contains offsets of metric already removed during a previous + // transaction. Metrics at those offsets should not be contained in new + // batches. + mask []int } func NewDiskBuffer(name, id, path string, stats BufferStats) (*DiskBuffer, error) { @@ -67,7 +74,11 @@ func (b *DiskBuffer) length() int { if b.isEmpty { return 0 } - // Special case for when the read index is zero, it must be empty (otherwise it would be >= 1) + + return b.entries() - len(b.mask) +} + +func (b *DiskBuffer) entries() int { if b.readIndex() == 0 { return 0 } @@ -121,28 +132,33 @@ func (b *DiskBuffer) addSingleMetric(m telegraf.Metric) bool { return false } -func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { +func (b *DiskBuffer) BeginTransaction(batchSize int) *Transaction { b.Lock() defer b.Unlock() if b.length() == 0 { - // no metrics in the wal file, so return an empty array - return make([]telegraf.Metric, 0) + return &Transaction{} } b.batchFirst = b.readIndex() - var metrics []telegraf.Metric - b.batchSize = 0 + + metrics := make([]telegraf.Metric, 0, batchSize) + offsets := make([]int, 0, batchSize) readIndex := b.batchFirst endIndex := b.writeIndex() + offset := 0 for batchSize > 0 && readIndex < endIndex { data, err := b.file.Read(readIndex) if err != nil { panic(err) } readIndex++ + offset++ - m, err := metric.FromBytes(data) + if slices.Contains(b.mask, offset) { + // Metric is masked by a previous write and is scheduled for removal + continue + } // Validate that a tracking metric is from this instance of telegraf and skip ones from older instances. // A tracking metric can be skipped here because metric.Accept() is only called once data is successfully @@ -152,11 +168,12 @@ func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { // - ErrSkipTracking: means that the tracking information was unable to be found for a tracking ID. // - Outside of range: means that the metric was guaranteed to be left over from the previous instance // as it was here when we opened the wal file in this instance. - if errors.Is(err, metric.ErrSkipTracking) { - // could not look up tracking information for metric, skip - continue - } + m, err := metric.FromBytes(data) if err != nil { + if errors.Is(err, metric.ErrSkipTracking) { + // could not look up tracking information for metric, skip + continue + } // non-recoverable error in deserialization, abort log.Printf("E! raw metric data: %v", data) panic(err) @@ -167,33 +184,82 @@ func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { } metrics = append(metrics, m) + offsets = append(offsets, offset) b.batchSize++ batchSize-- } - return metrics + return &Transaction{Batch: metrics, valid: true, state: offsets} } -func (b *DiskBuffer) Accept(batch []telegraf.Metric) { +func (b *DiskBuffer) EndTransaction(tx *Transaction) { + if len(tx.Batch) == 0 { + return + } + + // Ignore invalid transactions and make sure they can only be finished once + if !tx.valid { + return + } + tx.valid = false + + // Get the metric offsets from the transaction + offsets := tx.state.([]int) + b.Lock() defer b.Unlock() - if b.batchSize == 0 || len(batch) == 0 { - // nothing to accept + // Mark metrics which should be removed in the internal mask + remove := make([]int, 0, len(tx.Accept)+len(tx.Reject)) + for _, idx := range tx.Accept { + b.metricWritten(tx.Batch[idx]) + remove = append(remove, offsets[idx]) + } + for _, idx := range tx.Reject { + b.metricRejected(tx.Batch[idx]) + remove = append(remove, offsets[idx]) + } + b.mask = append(b.mask, remove...) + sort.Ints(b.mask) + + // Remove the metrics that are marked for removal from the front of the + // WAL file. All other metrics must be kept. + if len(b.mask) == 0 || b.mask[0] != 0 { + // Mask is empty or the first index is not the front of the file, so + // exit early as there is nothing to remove return } - for _, m := range batch { - b.metricWritten(m) + + // Determine up to which index we can remove the entries from the WAL file + var removeIdx int + for i, offset := range b.mask { + if offset != i { + break + } + removeIdx = offset } - if b.length() == len(batch) { - b.emptyFile() + + // Remove the metrics in front from the WAL file + b.isEmpty = b.entries()-removeIdx-1 <= 0 + if b.isEmpty { + // WAL files cannot be fully empty but need to contain at least one + // item to not throw an error + if err := b.file.TruncateFront(b.writeIndex()); err != nil { + log.Printf("E! batch length: %d, first: %d, size: %d", len(tx.Batch), b.batchFirst, b.batchSize) + panic(err) + } } else { - err := b.file.TruncateFront(b.batchFirst + uint64(len(batch))) - if err != nil { - log.Printf("E! batch length: %d, batchFirst: %d, batchSize: %d", len(batch), b.batchFirst, b.batchSize) + if err := b.file.TruncateFront(b.batchFirst + uint64(removeIdx+1)); err != nil { + log.Printf("E! batch length: %d, first: %d, size: %d", len(tx.Batch), b.batchFirst, b.batchSize) panic(err) } } + // Truncate the mask and update the relative offsets + b.mask = b.mask[:removeIdx] + for i := range b.mask { + b.mask[i] -= removeIdx + } + // check if the original end index is still valid, clear if not if b.originalEnd < b.readIndex() { b.originalEnd = 0 @@ -203,14 +269,6 @@ func (b *DiskBuffer) Accept(batch []telegraf.Metric) { b.BufferSize.Set(int64(b.length())) } -func (b *DiskBuffer) Reject(_ []telegraf.Metric) { - // very little to do here as the disk buffer retains metrics in - // the wal file until a call to accept - b.Lock() - defer b.Unlock() - b.resetBatch() -} - func (b *DiskBuffer) Stats() BufferStats { return b.BufferStats } @@ -238,14 +296,3 @@ func (b *DiskBuffer) handleEmptyFile() { } b.isEmpty = false } - -func (b *DiskBuffer) emptyFile() { - if b.isEmpty || b.length() == 0 { - return - } - if err := b.file.TruncateFront(b.writeIndex() - 1); err != nil { - log.Printf("E! writeIndex: %d, buffer len: %d", b.writeIndex(), b.length()) - panic(err) - } - b.isEmpty = true -} diff --git a/models/buffer_disk_test.go b/models/buffer_disk_test.go index 3f04ef86d6246..15ff25a73c42b 100644 --- a/models/buffer_disk_test.go +++ b/models/buffer_disk_test.go @@ -27,9 +27,9 @@ func TestDiskBufferRetainsTrackingInformation(t *testing.T) { defer buf.Close() buf.Add(mm) - - batch := buf.Batch(1) - buf.Accept(batch) + tx := buf.BeginTransaction(1) + tx.AcceptAll() + buf.EndTransaction(tx) require.Equal(t, 1, delivered) } @@ -85,11 +85,11 @@ func TestDiskBufferTrackingDroppedFromOldWal(t *testing.T) { buf.Stats().MetricsDropped.Set(0) defer buf.Close() - batch := buf.Batch(4) + tx := buf.BeginTransaction(4) // Check that the tracking metric is skipped expected := []telegraf.Metric{ metrics[0], metrics[1], metrics[2], metrics[4], } - testutil.RequireMetricsEqual(t, expected, batch) + testutil.RequireMetricsEqual(t, expected, tx.Batch) } diff --git a/models/buffer_mem.go b/models/buffer_mem.go index 7bba4744f4e07..3c2daa89c51a3 100644 --- a/models/buffer_mem.go +++ b/models/buffer_mem.go @@ -51,67 +51,67 @@ func (b *MemoryBuffer) Add(metrics ...telegraf.Metric) int { return dropped } -func (b *MemoryBuffer) Batch(batchSize int) []telegraf.Metric { +func (b *MemoryBuffer) BeginTransaction(batchSize int) *Transaction { b.Lock() defer b.Unlock() outLen := min(b.size, batchSize) - out := make([]telegraf.Metric, outLen) if outLen == 0 { - return out + return &Transaction{} } b.batchFirst = b.first b.batchSize = outLen - batchIndex := b.batchFirst - for i := range out { - out[i] = b.buf[batchIndex] + batch := make([]telegraf.Metric, outLen) + for i := range batch { + batch[i] = b.buf[batchIndex] b.buf[batchIndex] = nil batchIndex = b.next(batchIndex) } b.first = b.nextby(b.first, b.batchSize) b.size -= outLen - return out + return &Transaction{Batch: batch, valid: true} } -func (b *MemoryBuffer) Accept(batch []telegraf.Metric) { +func (b *MemoryBuffer) EndTransaction(tx *Transaction) { b.Lock() defer b.Unlock() - for _, m := range batch { - b.metricWritten(m) - } - - b.resetBatch() - b.BufferSize.Set(int64(b.length())) -} - -func (b *MemoryBuffer) Reject(batch []telegraf.Metric) { - b.Lock() - defer b.Unlock() - - if len(batch) == 0 { + // Ignore invalid transactions and make sure they can only be finished once + if !tx.valid { return } + tx.valid = false - free := b.cap - b.size - restore := min(len(batch), free) - skip := len(batch) - restore + // Accept metrics + for _, idx := range tx.Accept { + b.metricWritten(tx.Batch[idx]) + } - b.first = b.prevby(b.first, restore) - b.size = min(b.size+restore, b.cap) + // Reject metrics + for _, idx := range tx.Reject { + b.metricRejected(tx.Batch[idx]) + } - re := b.first + // Keep metrics + keep := tx.InferKeep() + if len(keep) > 0 { + restore := min(len(keep), b.cap-b.size) + b.first = b.prevby(b.first, restore) + b.size = min(b.size+restore, b.cap) + + // Restore the metrics that fit into the buffer + current := b.first + for i := 0; i < restore; i++ { + b.buf[current] = tx.Batch[keep[i]] + current = b.next(current) + } - // Copy metrics from the batch back into the buffer - for i := range batch { - if i < skip { - b.metricDropped(batch[i]) - } else { - b.buf[re] = batch[i] - re = b.next(re) + // Drop all remaining metrics + for i := restore; i < len(keep); i++ { + b.metricDropped(tx.Batch[keep[i]]) } } diff --git a/models/buffer_mem_test.go b/models/buffer_mem_test.go index 650bd3bf65c93..8a473fcb5a0a9 100644 --- a/models/buffer_mem_test.go +++ b/models/buffer_mem_test.go @@ -24,8 +24,9 @@ func TestMemoryBufferAcceptCallsMetricAccept(t *testing.T) { }, } buf.Add(mm, mm, mm) - batch := buf.Batch(2) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.AcceptAll() + buf.EndTransaction(tx) require.Equal(t, 2, accept) } diff --git a/models/buffer_suite_test.go b/models/buffer_suite_test.go index 99d008096373a..80ce63bdce95b 100644 --- a/models/buffer_suite_test.go +++ b/models/buffer_suite_test.go @@ -53,6 +53,7 @@ func (s *BufferSuiteTest) newTestBuffer(capacity int) Buffer { s.Require().NoError(err) buf.Stats().MetricsAdded.Set(0) buf.Stats().MetricsWritten.Set(0) + buf.Stats().MetricsRejected.Set(0) buf.Stats().MetricsDropped.Set(0) return buf } @@ -99,16 +100,16 @@ func (s *BufferSuiteTest) TestBufferBatchLenZero() { buf := s.newTestBuffer(5) defer buf.Close() - batch := buf.Batch(0) - s.Empty(batch) + tx := buf.BeginTransaction(0) + s.Empty(tx.Batch) } func (s *BufferSuiteTest) TestBufferBatchLenBufferEmpty() { buf := s.newTestBuffer(5) defer buf.Close() - batch := buf.Batch(2) - s.Empty(batch) + tx := buf.BeginTransaction(2) + s.Empty(tx.Batch) } func (s *BufferSuiteTest) TestBufferBatchLenUnderfill() { @@ -117,8 +118,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenUnderfill() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m) - batch := buf.Batch(2) - s.Len(batch, 1) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 1) } func (s *BufferSuiteTest) TestBufferBatchLenFill() { @@ -127,8 +128,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenFill() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) } func (s *BufferSuiteTest) TestBufferBatchLenExact() { @@ -137,8 +138,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenExact() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) } func (s *BufferSuiteTest) TestBufferBatchLenLargerThanBuffer() { @@ -147,8 +148,8 @@ func (s *BufferSuiteTest) TestBufferBatchLenLargerThanBuffer() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(6) - s.Len(batch, 5) + tx := buf.BeginTransaction(6) + s.Len(tx.Batch, 5) } func (s *BufferSuiteTest) TestBufferBatchWrap() { @@ -157,11 +158,12 @@ func (s *BufferSuiteTest) TestBufferBatchWrap() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(2) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.AcceptAll() + buf.EndTransaction(tx) buf.Add(m, m) - batch = buf.Batch(5) - s.Len(batch, 5) + tx = buf.BeginTransaction(5) + s.Len(tx.Batch, 5) } func (s *BufferSuiteTest) TestBufferBatchLatest() { @@ -171,13 +173,13 @@ func (s *BufferSuiteTest) TestBufferBatchLatest() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferBatchLatestWrap() { @@ -193,13 +195,13 @@ func (s *BufferSuiteTest) TestBufferBatchLatestWrap() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferMultipleBatch() { @@ -212,7 +214,7 @@ func (s *BufferSuiteTest) TestBufferMultipleBatch() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -220,14 +222,16 @@ func (s *BufferSuiteTest) TestBufferMultipleBatch() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) - buf.Accept(batch) - batch = buf.Batch(5) + }, tx.Batch) + tx.AcceptAll() + buf.EndTransaction(tx) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), - }, batch) - buf.Accept(batch) + }, tx.Batch) + tx.AcceptAll() + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferRejectWithRoom() { @@ -237,14 +241,15 @@ func (s *BufferSuiteTest) TestBufferRejectWithRoom() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -252,7 +257,7 @@ func (s *BufferSuiteTest) TestBufferRejectWithRoom() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectNothingNewFull() { @@ -264,12 +269,13 @@ func (s *BufferSuiteTest) TestBufferRejectNothingNewFull() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - batch := buf.Batch(2) - buf.Reject(batch) + tx := buf.BeginTransaction(2) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -277,7 +283,7 @@ func (s *BufferSuiteTest) TestBufferRejectNothingNewFull() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectNoRoom() { @@ -291,18 +297,19 @@ func (s *BufferSuiteTest) TestBufferRejectNoRoom() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(3), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), @@ -310,7 +317,7 @@ func (s *BufferSuiteTest) TestBufferRejectNoRoom() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectRoomExact() { @@ -319,16 +326,17 @@ func (s *BufferSuiteTest) TestBufferRejectRoomExact() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0)), @@ -336,7 +344,7 @@ func (s *BufferSuiteTest) TestBufferRejectRoomExact() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectRoomOverwriteOld() { @@ -350,16 +358,17 @@ func (s *BufferSuiteTest) TestBufferRejectRoomOverwriteOld() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(1) + tx := buf.BeginTransaction(1) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(1), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0)), @@ -367,7 +376,7 @@ func (s *BufferSuiteTest) TestBufferRejectRoomOverwriteOld() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectPartialRoom() { @@ -381,16 +390,17 @@ func (s *BufferSuiteTest) TestBufferRejectPartialRoom() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(2), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0)), @@ -398,7 +408,7 @@ func (s *BufferSuiteTest) TestBufferRejectPartialRoom() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { @@ -412,7 +422,7 @@ func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) @@ -435,11 +445,12 @@ func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(15, 0))) // buffer: 13, 14, 15, 11, 12; batch: 2, 3 s.Equal(int64(8), buf.Stats().MetricsDropped.Get()) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(10), buf.Stats().MetricsDropped.Get()) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0)), @@ -447,7 +458,7 @@ func (s *BufferSuiteTest) TestBufferRejectNewMetricsWrapped() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(13, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(14, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(15, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectWrapped() { @@ -467,16 +478,17 @@ func (s *BufferSuiteTest) TestBufferRejectWrapped() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0))) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(9, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(12, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) - batch = buf.Batch(5) + tx = buf.BeginTransaction(5) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0)), @@ -484,7 +496,7 @@ func (s *BufferSuiteTest) TestBufferRejectWrapped() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(12, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferRejectAdjustFirst() { @@ -498,36 +510,39 @@ func (s *BufferSuiteTest) TestBufferRejectAdjustFirst() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(4, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(5, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(6, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(7, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(8, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(9, 0))) - batch = buf.Batch(3) + tx = buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(11, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(12, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(13, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(14, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(15, 0))) - batch = buf.Batch(3) + tx = buf.BeginTransaction(3) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(16, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(17, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(18, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(19, 0))) - batch = buf.Batch(10) + tx = buf.BeginTransaction(10) testutil.RequireMetricsEqual(s.T(), []telegraf.Metric{ metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(10, 0)), @@ -540,7 +555,7 @@ func (s *BufferSuiteTest) TestBufferRejectAdjustFirst() { metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(17, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(18, 0)), metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(19, 0)), - }, batch) + }, tx.Batch) } func (s *BufferSuiteTest) TestBufferAddDropsOverwrittenMetrics() { @@ -565,8 +580,9 @@ func (s *BufferSuiteTest) TestBufferAcceptRemovesBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - batch := buf.Batch(2) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(1, buf.Len()) } @@ -576,8 +592,9 @@ func (s *BufferSuiteTest) TestBufferRejectLeavesBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - batch := buf.Batch(2) - buf.Reject(batch) + tx := buf.BeginTransaction(2) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(3, buf.Len()) } @@ -587,9 +604,10 @@ func (s *BufferSuiteTest) TestBufferAcceptWritesOverwrittenBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(m, m, m, m, m) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get()) s.Equal(int64(5), buf.Stats().MetricsWritten.Get()) @@ -605,9 +623,10 @@ func (s *BufferSuiteTest) TestBufferBatchRejectDropsOverwrittenBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(m, m, m, m, m) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(5), buf.Stats().MetricsDropped.Get()) s.Equal(int64(0), buf.Stats().MetricsWritten.Get()) @@ -619,9 +638,10 @@ func (s *BufferSuiteTest) TestBufferMetricsOverwriteBatchAccept() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(int64(0), buf.Stats().MetricsDropped.Get(), "dropped") s.Equal(int64(3), buf.Stats().MetricsWritten.Get(), "written") } @@ -636,9 +656,10 @@ func (s *BufferSuiteTest) TestBufferMetricsOverwriteBatchReject() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(int64(3), buf.Stats().MetricsDropped.Get()) s.Equal(int64(0), buf.Stats().MetricsWritten.Get()) } @@ -653,9 +674,10 @@ func (s *BufferSuiteTest) TestBufferMetricsBatchAcceptRemoved() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m, m, m) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(int64(2), buf.Stats().MetricsDropped.Get()) s.Equal(int64(3), buf.Stats().MetricsWritten.Get()) } @@ -670,10 +692,10 @@ func (s *BufferSuiteTest) TestBufferWrapWithBatch() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m) - buf.Batch(3) + tx := buf.BeginTransaction(3) buf.Add(m, m, m, m, m, m) - s.Equal(int64(1), buf.Stats().MetricsDropped.Get()) + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferBatchNotRemoved() { @@ -682,8 +704,9 @@ func (s *BufferSuiteTest) TestBufferBatchNotRemoved() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - buf.Batch(2) + tx := buf.BeginTransaction(2) s.Equal(5, buf.Len()) + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferBatchRejectAcceptNoop() { @@ -692,9 +715,11 @@ func (s *BufferSuiteTest) TestBufferBatchRejectAcceptNoop() { m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) buf.Add(m, m, m, m, m) - batch := buf.Batch(2) - buf.Reject(batch) - buf.Accept(batch) + tx := buf.BeginTransaction(2) + tx.KeepAll() + buf.EndTransaction(tx) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(5, buf.Len()) } @@ -734,10 +759,11 @@ func (s *BufferSuiteTest) TestBufferAddCallsMetricRejectWhenNotInBatch() { }, } buf.Add(mm, mm, mm, mm, mm) - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(mm, mm, mm, mm) s.Equal(2, reject) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(4, reject) } @@ -757,10 +783,11 @@ func (s *BufferSuiteTest) TestBufferRejectCallsMetricRejectWithOverwritten() { }, } buf.Add(mm, mm, mm, mm, mm) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(mm, mm) s.Equal(0, reject) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(2, reject) } @@ -780,13 +807,14 @@ func (s *BufferSuiteTest) TestBufferAddOverwriteAndReject() { }, } buf.Add(mm, mm, mm, mm, mm) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(mm, mm, mm, mm, mm) buf.Add(mm, mm, mm, mm, mm) buf.Add(mm, mm, mm, mm, mm) buf.Add(mm, mm, mm, mm, mm) s.Equal(15, reject) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) s.Equal(20, reject) } @@ -812,7 +840,7 @@ func (s *BufferSuiteTest) TestBufferAddOverwriteAndRejectOffset() { buf.Add(mm, mm, mm) buf.Add(mm, mm, mm, mm) s.Equal(2, reject) - batch := buf.Batch(5) + tx := buf.BeginTransaction(5) buf.Add(mm, mm, mm, mm) s.Equal(2, reject) buf.Add(mm, mm, mm, mm) @@ -821,7 +849,8 @@ func (s *BufferSuiteTest) TestBufferAddOverwriteAndRejectOffset() { s.Equal(9, reject) buf.Add(mm, mm, mm, mm) s.Equal(13, reject) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(13, reject) s.Equal(5, accept) } @@ -830,14 +859,16 @@ func (s *BufferSuiteTest) TestBufferRejectEmptyBatch() { buf := s.newTestBuffer(5) defer buf.Close() - batch := buf.Batch(2) + tx := buf.BeginTransaction(2) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) - buf.Reject(batch) + tx.KeepAll() + buf.EndTransaction(tx) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) - batch = buf.Batch(2) - for _, m := range batch { + tx = buf.BeginTransaction(2) + for _, m := range tx.Batch { s.NotNil(m) } + buf.EndTransaction(tx) } func (s *BufferSuiteTest) TestBufferFlushedPartial() { @@ -847,10 +878,11 @@ func (s *BufferSuiteTest) TestBufferFlushedPartial() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(3, 0))) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(1, buf.Len()) } @@ -860,13 +892,48 @@ func (s *BufferSuiteTest) TestBufferFlushedFull() { buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(1, 0))) buf.Add(metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(2, 0))) - batch := buf.Batch(2) - s.Len(batch, 2) + tx := buf.BeginTransaction(2) + s.Len(tx.Batch, 2) - buf.Accept(batch) + tx.AcceptAll() + buf.EndTransaction(tx) s.Equal(0, buf.Len()) } +func (s *BufferSuiteTest) TestPartialWriteBackToFront() { + buf := s.newTestBuffer(5) + defer buf.Close() + + m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)) + buf.Add(m, m, m, m, m) + + // Get a batch of all metrics but only reject the last one + tx := buf.BeginTransaction(5) + s.Len(tx.Batch, 5) + tx.Reject = []int{4} + buf.EndTransaction(tx) + s.Equal(4, buf.Len()) + + // Get the next batch which should miss the last metric + tx = buf.BeginTransaction(5) + s.Len(tx.Batch, 4) + tx.Accept = []int{3} + buf.EndTransaction(tx) + s.Equal(3, buf.Len()) + + // Now get the next batch and reject the remaining metrics + tx = buf.BeginTransaction(5) + s.Len(tx.Batch, 3) + tx.Accept = []int{0, 1, 2} + buf.EndTransaction(tx) + s.Equal(0, buf.Len()) + + s.Equal(int64(5), buf.Stats().MetricsAdded.Get(), "metrics added") + s.Equal(int64(4), buf.Stats().MetricsWritten.Get(), "metrics written") + s.Equal(int64(1), buf.Stats().MetricsRejected.Get(), "metrics rejected") + s.Equal(int64(0), buf.Stats().MetricsDropped.Get(), "metrics dropped") +} + type mockMetric struct { telegraf.Metric AcceptF func() diff --git a/models/running_output.go b/models/running_output.go index c8a730d572ba6..fd1622c4438de 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -301,22 +301,21 @@ func (r *RunningOutput) Write() error { atomic.StoreInt64(&r.newMetricsCount, 0) - // Only process the metrics in the buffer now. Metrics added while we are + // Only process the metrics in the buffer now. Metrics added while we are // writing will be sent on the next call. nBuffer := r.buffer.Len() nBatches := nBuffer/r.MetricBatchSize + 1 for i := 0; i < nBatches; i++ { - batch := r.buffer.Batch(r.MetricBatchSize) - if len(batch) == 0 { - break + tx := r.buffer.BeginTransaction(r.MetricBatchSize) + if len(tx.Batch) == 0 { + return nil } - - err := r.writeMetrics(batch) + err := r.writeMetrics(tx.Batch) + r.updateTransaction(tx, err) + r.buffer.EndTransaction(tx) if err != nil { - r.buffer.Reject(batch) return err } - r.buffer.Accept(batch) } return nil } @@ -334,19 +333,15 @@ func (r *RunningOutput) WriteBatch() error { r.log.Debugf("Successfully connected after %d attempts", r.retries) } - batch := r.buffer.Batch(r.MetricBatchSize) - if len(batch) == 0 { + tx := r.buffer.BeginTransaction(r.MetricBatchSize) + if len(tx.Batch) == 0 { return nil } + err := r.writeMetrics(tx.Batch) + r.updateTransaction(tx, err) + r.buffer.EndTransaction(tx) - err := r.writeMetrics(batch) - if err != nil { - r.buffer.Reject(batch) - return err - } - r.buffer.Accept(batch) - - return nil + return err } func (r *RunningOutput) writeMetrics(metrics []telegraf.Metric) error { @@ -367,6 +362,26 @@ func (r *RunningOutput) writeMetrics(metrics []telegraf.Metric) error { return err } +func (r *RunningOutput) updateTransaction(tx *Transaction, err error) { + // No error indicates all metrics were written successfully + if err == nil { + tx.AcceptAll() + return + } + + // A non-partial-write-error indicated none of the metrics were written + // successfully and we should keep them for the next write cycle + var writeErr *internal.PartialWriteError + if !errors.As(err, &writeErr) { + tx.KeepAll() + return + } + + // Transfer the accepted and rejected indices based on the write error values + tx.Accept = writeErr.MetricsAccept + tx.Reject = writeErr.MetricsReject +} + func (r *RunningOutput) LogBufferStatus() { nBuffer := r.buffer.Len() if r.Config.BufferStrategy == "disk" { diff --git a/models/running_output_test.go b/models/running_output_test.go index c045dcf0140f2..3c8b9e5951e1a 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -433,6 +433,7 @@ func TestRunningOutputInternalMetrics(t *testing.T) { "buffer_size": 0, "errors": 0, "metrics_added": 0, + "metrics_rejected": 0, "metrics_dropped": 0, "metrics_filtered": 0, "metrics_written": 0, From 64d2a868d51221f1d163f141d5d496ec799147c0 Mon Sep 17 00:00:00 2001 From: Mingyang Zheng Date: Thu, 5 Dec 2024 06:30:10 -0800 Subject: [PATCH 119/170] fix(logging): Clean up extra empty spaces when redirectLogger is used (#16255) --- logger/handler.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/logger/handler.go b/logger/handler.go index 6bc066658d183..76bf64a9c32d8 100644 --- a/logger/handler.go +++ b/logger/handler.go @@ -119,10 +119,15 @@ func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix str for k, v := range attr { parts = append(parts, fmt.Sprintf("%s=%v", k, v)) } - attrMsg = " (" + strings.Join(parts, ",") + ")" + attrMsg = "(" + strings.Join(parts, ",") + ")" } - msg := append([]interface{}{ts.In(time.UTC).Format(time.RFC3339), " ", level.Indicator(), " ", prefix + attrMsg}, args...) + msg := []interface{}{ts.In(time.UTC).Format(time.RFC3339), level.Indicator(), prefix + attrMsg} + if prefix+attrMsg != "" { + msg = append(msg, prefix+attrMsg) + } + msg = append(msg, args...) + fmt.Fprintln(l.writer, msg...) } From e0ff766ceb4be5435543f96be9eac1fc9aa958c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 08:30:42 -0600 Subject: [PATCH 120/170] chore(deps): Bump cloud.google.com/go/storage from 1.43.0 to 1.47.0 (#16235) --- docs/LICENSE_OF_DEPENDENCIES.md | 12 +++++++++ go.mod | 19 +++++++++++--- go.sum | 45 +++++++++++++++++++++++++++------ 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ea7cdf17d143c..bee75fe5e96f0 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -3,6 +3,7 @@ When distributed in a binary form, Telegraf may contain portions of the following works: +- cel.dev/expr [Apache License 2.0](https://github.com/google/cel-spec/blob/master/LICENSE) - cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE) - code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) - collectd.org [ISC License](https://github.com/collectd/go-collectd/blob/master/LICENSE) @@ -27,6 +28,9 @@ following works: - github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) - github.com/AzureAD/microsoft-authentication-library-for-go [MIT License](https://github.com/AzureAD/microsoft-authentication-library-for-go/blob/main/LICENSE) - github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp [Apache License 2.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric [Apache License 2.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping [Apache License 2.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE) - github.com/IBM/nzgo [MIT License](https://github.com/IBM/nzgo/blob/master/LICENSE.md) - github.com/IBM/sarama [MIT License](https://github.com/IBM/sarama/blob/master/LICENSE.md) - github.com/JohnCGriffin/overflow [MIT License](https://github.com/JohnCGriffin/overflow/blob/master/README.md) @@ -97,10 +101,12 @@ following works: - github.com/bwmarrin/snowflake [BSD 2-Clause "Simplified" License](https://github.com/bwmarrin/snowflake/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/census-instrumentation/opencensus-proto [Apache License 2.0](https://github.com/census-instrumentation/opencensus-proto/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/clarify/clarify-go [Apache License 2.0](https://github.com/clarify/clarify-go/blob/master/LICENSE) - github.com/cloudevents/sdk-go [Apache License 2.0](https://github.com/cloudevents/sdk-go/blob/main/LICENSE) +- github.com/cncf/xds/go [Apache License 2.0](https://github.com/cncf/xds/blob/main/LICENSE) - github.com/compose-spec/compose-go [Apache License 2.0](https://github.com/compose-spec/compose-go/blob/master/LICENSE) - github.com/containerd/log [Apache License 2.0](https://github.com/containerd/log/blob/main/LICENSE) - github.com/containerd/platforms [Apache License 2.0](https://github.com/containerd/platforms/blob/main/LICENSE) @@ -134,6 +140,8 @@ following works: - github.com/eclipse/paho.golang [Eclipse Public License - v 2.0](https://github.com/eclipse/paho.golang/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 2.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) - github.com/emicklei/go-restful [MIT License](https://github.com/emicklei/go-restful/blob/v3/LICENSE) +- github.com/envoyproxy/go-control-plane [Apache License 2.0](https://github.com/envoyproxy/go-control-plane/blob/main/LICENSE) +- github.com/envoyproxy/protoc-gen-validate [Apache License 2.0](https://github.com/bufbuild/protoc-gen-validate/blob/main/LICENSE) - github.com/facebook/time [Apache License 2.0](https://github.com/facebook/time/blob/main/LICENSE) - github.com/fatih/color [MIT License](https://github.com/fatih/color/blob/master/LICENSE.md) - github.com/felixge/httpsnoop [MIT License](https://github.com/felixge/httpsnoop/blob/master/LICENSE.txt) @@ -398,10 +406,13 @@ following works: - go.opentelemetry.io/collector/consumer [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) - go.opentelemetry.io/collector/pdata [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) - go.opentelemetry.io/collector/semconv [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) +- go.opentelemetry.io/contrib/detectors/gcp [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/LICENSE) - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/LICENSE) - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/LICENSE) - go.opentelemetry.io/otel [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) - go.opentelemetry.io/otel/metric [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) +- go.opentelemetry.io/otel/sdk [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) +- go.opentelemetry.io/otel/sdk/metric [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) - go.opentelemetry.io/otel/trace [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-go/blob/main/LICENSE) - go.opentelemetry.io/proto/otlp [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-proto-go/blob/main/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) @@ -427,6 +438,7 @@ following works: - google.golang.org/genproto/googleapis/api [Apache License 2.0](https://pkg.go.dev/google.golang.org/genproto/googleapis/api?tab=licenses) - google.golang.org/genproto/googleapis/rpc [Apache License 2.0](https://pkg.go.dev/google.golang.org/genproto/googleapis/rpc?tab=licenses) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) +- google.golang.org/grpc/stats/opentelemetry [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) - google.golang.org/protobuf [BSD 3-Clause "New" or "Revised" License](https://pkg.go.dev/google.golang.org/protobuf?tab=licenses) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) diff --git a/go.mod b/go.mod index 54ba5fdb75b72..66bab6f6210f7 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/monitoring v1.21.1 cloud.google.com/go/pubsub v1.45.1 - cloud.google.com/go/storage v1.43.0 + cloud.google.com/go/storage v1.47.0 collectd.org v0.6.0 github.com/99designs/keyring v1.2.2 github.com/Azure/azure-event-hubs-go/v3 v3.6.2 @@ -209,7 +209,7 @@ require ( go.mongodb.org/mongo-driver v1.17.0 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 - go.opentelemetry.io/otel/sdk/metric v1.27.0 + go.opentelemetry.io/otel/sdk/metric v1.29.0 go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net v0.0.0-20240925182052-1207426daebd go.step.sm/crypto v0.54.0 @@ -239,9 +239,10 @@ require ( ) require ( + cel.dev/expr v0.16.1 // indirect cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect @@ -265,6 +266,9 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect @@ -303,8 +307,10 @@ require ( github.com/caio/go-tdigest/v4 v4.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/couchbase/gomemcached v0.1.3 // indirect @@ -324,6 +330,8 @@ require ( github.com/ebitengine/purego v0.8.1 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.4 // indirect @@ -447,6 +455,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.6 // indirect github.com/pkg/xattr v0.4.10 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect @@ -495,6 +504,7 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/consumer v0.101.0 // indirect go.opentelemetry.io/collector/semconv v0.105.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel v1.30.0 // indirect @@ -511,6 +521,7 @@ require ( golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 87fe3b6f0fced..8b87a9e19d693 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -99,10 +101,10 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.2 h1:oKF7rgBfSHdp/kuhXtqU/tNDr0mZqhYbEh+6SiqzkKo= +cloud.google.com/go/auth v0.10.2/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -355,6 +357,8 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6 cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= @@ -550,8 +554,8 @@ cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeL cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +cloud.google.com/go/storage v1.47.0 h1:ajqgt30fnOMmLfWfu1PWcb+V9Dxz6n+9WKjdNg5R4HM= +cloud.google.com/go/storage v1.47.0/go.mod h1:Ks0vP374w0PW6jOUameJbapbQKXqkjGd/OJRp2fb9IQ= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -571,6 +575,8 @@ cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -719,6 +725,14 @@ github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Files-com/files-sdk-go/v3 v3.2.34 h1:j6gSzu6BF1wWH1z4itRe7eKhQSCrx/I78SDNiBBUtvI= github.com/Files-com/files-sdk-go/v3 v3.2.34/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 h1:TBb4IxmBH0ssmWTUg0C6c9ZnfDmZospTF8f+YbHnbbA= @@ -1000,6 +1014,7 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -1042,6 +1057,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9W0= @@ -1164,10 +1181,14 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc h1:0VQsg5ZXW9MPUxzemUHW7UBK8gfIO8K+YJGbdv4kBIM= github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc/go.mod h1:2UFAomOuD2vAK1x68czUtCVjAqmyWCEnAXOlmGqf+G0= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -2095,6 +2116,8 @@ github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -2452,6 +2475,8 @@ go.opentelemetry.io/collector/pdata/testdata v0.101.0 h1:JzeUtg5RN1iIFgY8DakGlqB go.opentelemetry.io/collector/pdata/testdata v0.101.0/go.mod h1:ZGobfCus4fWo5RduZ7ENI0+HD9BewgKuO6qU2rBVnUg= go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= @@ -2464,12 +2489,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9RO go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -3296,6 +3323,8 @@ google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpX google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 4983e07f100b0b73a2c6c955e15e0545922eef89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 5 Dec 2024 15:31:11 +0100 Subject: [PATCH 121/170] chore(deps): Bump golangci-lint from v1.62.0 to v1.62.2 (#16250) --- .circleci/config.yml | 6 +++--- .golangci.yml | 9 ++++----- Makefile | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9ba7e9c4fed15..20568a9c4284f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -106,7 +106,7 @@ jobs: - run: 'make check-deps' - run: name: "Install golangci-lint" - command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 - run: name: "golangci-lint/Linux" # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number @@ -120,7 +120,7 @@ jobs: - check-changed-files-or-halt - run: name: "Install golangci-lint" - command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 - run: name: "golangci-lint/macOS" # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number @@ -134,7 +134,7 @@ jobs: - check-changed-files-or-halt - run: name: "Install golangci-lint" - command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 - run: name: "golangci-lint/Windows" # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number diff --git a/.golangci.yml b/.golangci.yml index 9821917770265..a7eab4390f758 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -284,10 +284,9 @@ linters-settings: - name: import-shadowing - name: increment-decrement - name: indent-error-flow -# Enable again when https://github.com/mgechev/revive/issues/1103 is fixed -# - name: max-public-structs -# exclude: [ "TEST" ] -# arguments: [ 5 ] + - name: max-public-structs + exclude: [ "TEST" ] + arguments: [ 5 ] - name: modifies-parameter - name: modifies-value-receiver - name: optimize-operands-order @@ -392,7 +391,7 @@ issues: text: "Use of weak random number generator" #gosec:G404 - path-except: ^plugins/(aggregators|inputs|outputs|parsers|processors|serializers)/... - text: "max-public-structs: you have exceeded the maximum number of public struct declarations" #revive:max-public-structs + text: "max-public-structs: you have exceeded the maximum number" #revive:max-public-structs # Independently of option `exclude` we use default exclude patterns, # it can be disabled by this option. diff --git a/Makefile b/Makefile index 543bd9ae2f537..71ac0a668c316 100644 --- a/Makefile +++ b/Makefile @@ -180,7 +180,7 @@ vet: .PHONY: lint-install lint-install: @echo "Installing golangci-lint" - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.0 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2 @echo "Installing markdownlint" npm install -g markdownlint-cli From b864c8ceeb096b0eca4212ca0da1b82750c9f544 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 5 Dec 2024 15:32:10 +0100 Subject: [PATCH 122/170] chore(serializers)!: Remove old-style creation (#15971) --- CHANGELOG.md | 4 + config/config.go | 9 - config/config_test.go | 125 -------------- models/running_serializer.go | 5 +- plugins/outputs/amqp/amqp.go | 5 +- .../azure_data_explorer.go | 3 +- plugins/outputs/cloud_pubsub/cloud_pubsub.go | 5 +- plugins/outputs/event_hubs/event_hubs.go | 5 +- plugins/outputs/exec/exec.go | 5 +- plugins/outputs/execd/execd.go | 5 +- plugins/outputs/file/file.go | 5 +- plugins/outputs/http/http.go | 5 +- plugins/outputs/http/http_test.go | 3 +- plugins/outputs/kafka/kafka.go | 5 +- plugins/outputs/kinesis/kinesis.go | 5 +- plugins/outputs/kinesis/kinesis_test.go | 13 +- plugins/outputs/mqtt/mqtt.go | 5 +- plugins/outputs/nats/nats.go | 5 +- plugins/outputs/nsq/nsq.go | 5 +- .../outputs/socket_writer/socket_writer.go | 9 +- .../socket_writer/socket_writer_test.go | 12 +- plugins/outputs/stomp/stomp.go | 5 +- plugins/outputs/sumologic/sumologic.go | 7 +- plugins/outputs/websocket/websocket.go | 5 +- plugins/processors/execd/execd.go | 3 +- plugins/serializers/binary/binary.go | 2 +- plugins/serializers/carbon2/carbon2.go | 10 +- .../serializers/cloudevents/cloudevents.go | 2 +- plugins/serializers/csv/csv.go | 12 +- plugins/serializers/graphite/graphite.go | 14 +- plugins/serializers/influx/influx.go | 11 +- plugins/serializers/json/json.go | 13 +- plugins/serializers/msgpack/msgpack.go | 7 +- plugins/serializers/nowmetric/nowmetric.go | 7 +- plugins/serializers/prometheus/prometheus.go | 12 +- .../prometheusremotewrite.go | 10 +- plugins/serializers/registry.go | 163 +----------------- .../serializers/splunkmetric/splunkmetric.go | 11 +- plugins/serializers/template/template.go | 2 +- plugins/serializers/wavefront/wavefront.go | 12 +- 40 files changed, 68 insertions(+), 483 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 32648b4f53bb4..ceae796d6ef68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,10 @@ delivery state update of un-parseable messages from `ACK` to `NACK` without requeueing. This way, those messages are not lost and can optionally be handled using a dead-letter exchange by other means. +- Removal of old-style serializer creation. This should not directly affect + users as it is an API change. All serializers in Telegraf are already ported + to the new framework. If you experience any issues with not being able to + create serializers let us know! ### Bugfixes diff --git a/config/config.go b/config/config.go index d80033a6050f1..3ae2025313b4c 100644 --- a/config/config.go +++ b/config/config.go @@ -1212,15 +1212,6 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } t.SetSerializer(serializer) - } else if t, ok := output.(serializers.SerializerOutput); ok { - // Keep the old interface for backward compatibility - // DEPRECATED: Please switch your plugin to telegraf.Serializers - missThreshold = 1 - serializer, err := c.addSerializer(name, table) - if err != nil { - return err - } - t.SetSerializer(serializer) } if t, ok := output.(telegraf.SerializerFuncPlugin); ok { diff --git a/config/config_test.go b/config/config_test.go index 7af2a790a5772..ace97545d0bf0 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -630,7 +630,6 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { require.NoError(t, c.LoadConfig("./testdata/serializers_new.toml")) require.Len(t, c.Outputs, len(formats)) - cfg := serializers.Config{} override := map[string]struct { param map[string]interface{} mask []string @@ -638,20 +637,12 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { expected := make([]telegraf.Serializer, 0, len(formats)) for _, format := range formats { - formatCfg := &cfg - formatCfg.DataFormat = format - logger := logging.New("serializers", format, "test") var serializer telegraf.Serializer if creator, found := serializers.Serializers[format]; found { t.Logf("new-style %q", format) serializer = creator() - } else { - t.Logf("old-style %q", format) - var err error - serializer, err = serializers.NewSerializer(formatCfg) - require.NoErrorf(t, err, "No serializer for format %q", format) } if settings, found := override[format]; found { @@ -703,98 +694,6 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { } } -func TestConfig_SerializerInterfaceOldFormat(t *testing.T) { - formats := []string{ - "carbon2", - "csv", - "graphite", - "influx", - "json", - "msgpack", - "nowmetric", - "prometheus", - "prometheusremotewrite", - "splunkmetric", - "wavefront", - } - - c := config.NewConfig() - require.NoError(t, c.LoadConfig("./testdata/serializers_old.toml")) - require.Len(t, c.Outputs, len(formats)) - - cfg := serializers.Config{} - override := map[string]struct { - param map[string]interface{} - mask []string - }{} - - expected := make([]telegraf.Serializer, 0, len(formats)) - for _, format := range formats { - formatCfg := &cfg - formatCfg.DataFormat = format - - logger := logging.New("serializers", format, "test") - - var serializer serializers.Serializer - if creator, found := serializers.Serializers[format]; found { - t.Logf("new-style %q", format) - serializer = creator() - } else { - t.Logf("old-style %q", format) - var err error - serializer, err = serializers.NewSerializer(formatCfg) - require.NoErrorf(t, err, "No serializer for format %q", format) - } - - if settings, found := override[format]; found { - s := reflect.Indirect(reflect.ValueOf(serializer)) - for key, value := range settings.param { - v := reflect.ValueOf(value) - s.FieldByName(key).Set(v) - } - } - models.SetLoggerOnPlugin(serializer, logger) - if s, ok := serializer.(telegraf.Initializer); ok { - require.NoError(t, s.Init()) - } - expected = append(expected, serializer) - } - require.Len(t, expected, len(formats)) - - actual := make([]interface{}, 0) - for _, plugin := range c.Outputs { - output, ok := plugin.Output.(*MockupOutputPluginSerializerOld) - require.True(t, ok) - // Get the parser set with 'SetParser()' - if p, ok := output.Serializer.(*models.RunningSerializer); ok { - actual = append(actual, p.Serializer) - } else { - actual = append(actual, output.Serializer) - } - } - require.Len(t, actual, len(formats)) - - for i, format := range formats { - // Determine the underlying type of the serializer - stype := reflect.Indirect(reflect.ValueOf(expected[i])).Interface() - // Ignore all unexported fields and fields not relevant for functionality - options := []cmp.Option{ - cmpopts.IgnoreUnexported(stype), - cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(serializers_prometheus.MetricTypes{})).Interface()), - cmpopts.IgnoreTypes(sync.Mutex{}, regexp.Regexp{}), - cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), - } - if settings, found := override[format]; found { - options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) - } - - // Do a manual comparison as require.EqualValues will also work on unexported fields - // that cannot be cleared or ignored. - diff := cmp.Diff(expected[i], actual[i], options...) - require.Emptyf(t, diff, "Difference in SetSerializer() for %q", format) - } -} - func TestConfig_ParserInterface(t *testing.T) { formats := []string{ "collectd", @@ -1503,27 +1402,6 @@ func (m *MockupOutputPlugin) Write(_ []telegraf.Metric) error { return nil } -// Mockup OUTPUT plugin for serializer testing to avoid cyclic dependencies -type MockupOutputPluginSerializerOld struct { - Serializer serializers.Serializer -} - -func (m *MockupOutputPluginSerializerOld) SetSerializer(s serializers.Serializer) { - m.Serializer = s -} -func (*MockupOutputPluginSerializerOld) Connect() error { - return nil -} -func (*MockupOutputPluginSerializerOld) Close() error { - return nil -} -func (*MockupOutputPluginSerializerOld) SampleConfig() string { - return "Mockup test output plugin" -} -func (*MockupOutputPluginSerializerOld) Write(_ []telegraf.Metric) error { - return nil -} - type MockupOutputPluginSerializerNew struct { Serializer telegraf.Serializer } @@ -1662,7 +1540,4 @@ func init() { outputs.Add("serializer_test_new", func() telegraf.Output { return &MockupOutputPluginSerializerNew{} }) - outputs.Add("serializer_test_old", func() telegraf.Output { - return &MockupOutputPluginSerializerOld{} - }) } diff --git a/models/running_serializer.go b/models/running_serializer.go index d7d0217953363..e2efa0092c114 100644 --- a/models/running_serializer.go +++ b/models/running_serializer.go @@ -5,7 +5,6 @@ import ( "github.com/influxdata/telegraf" logging "github.com/influxdata/telegraf/logger" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/selfstat" ) @@ -19,7 +18,7 @@ type SerializerConfig struct { } type RunningSerializer struct { - Serializer serializers.Serializer + Serializer telegraf.Serializer Config *SerializerConfig log telegraf.Logger @@ -28,7 +27,7 @@ type RunningSerializer struct { SerializationTime selfstat.Stat } -func NewRunningSerializer(serializer serializers.Serializer, config *SerializerConfig) *RunningSerializer { +func NewRunningSerializer(serializer telegraf.Serializer, config *SerializerConfig) *RunningSerializer { tags := map[string]string{"type": config.DataFormat} if config.Alias != "" { tags["alias"] = config.Alias diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index b41fe32a216a9..4270027bfa2c7 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -17,7 +17,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -67,7 +66,7 @@ type AMQP struct { tls.ClientConfig proxy.TCPProxy - serializer serializers.Serializer + serializer telegraf.Serializer connect func(*ClientConfig) (Client, error) client Client config *ClientConfig @@ -84,7 +83,7 @@ func (*AMQP) SampleConfig() string { return sampleConfig } -func (q *AMQP) SetSerializer(serializer serializers.Serializer) { +func (q *AMQP) SetSerializer(serializer telegraf.Serializer) { q.serializer = serializer } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index c89e56f9ca6e8..1c6cf4f1e8417 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -20,7 +20,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/json" ) @@ -36,7 +35,7 @@ type AzureDataExplorer struct { TableName string `toml:"table_name"` CreateTables bool `toml:"create_tables"` IngestionType string `toml:"ingestion_type"` - serializer serializers.Serializer + serializer telegraf.Serializer kustoClient *kusto.Client metricIngestors map[string]ingest.Ingestor } diff --git a/plugins/outputs/cloud_pubsub/cloud_pubsub.go b/plugins/outputs/cloud_pubsub/cloud_pubsub.go index 259ff37b6e77b..263a57f9826e8 100644 --- a/plugins/outputs/cloud_pubsub/cloud_pubsub.go +++ b/plugins/outputs/cloud_pubsub/cloud_pubsub.go @@ -18,7 +18,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -45,7 +44,7 @@ type PubSub struct { stubTopic func(id string) topic - serializer serializers.Serializer + serializer telegraf.Serializer publishResults []publishResult encoder internal.ContentEncoder } @@ -54,7 +53,7 @@ func (*PubSub) SampleConfig() string { return sampleConfig } -func (ps *PubSub) SetSerializer(serializer serializers.Serializer) { +func (ps *PubSub) SetSerializer(serializer telegraf.Serializer) { ps.serializer = serializer } diff --git a/plugins/outputs/event_hubs/event_hubs.go b/plugins/outputs/event_hubs/event_hubs.go index f7e0695ef7ab2..a5d0b6861d367 100644 --- a/plugins/outputs/event_hubs/event_hubs.go +++ b/plugins/outputs/event_hubs/event_hubs.go @@ -11,7 +11,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -62,7 +61,7 @@ type EventHubs struct { Hub EventHubInterface batchOptions []eventhub.BatchOption - serializer serializers.Serializer + serializer telegraf.Serializer } const ( @@ -104,7 +103,7 @@ func (e *EventHubs) Close() error { return nil } -func (e *EventHubs) SetSerializer(serializer serializers.Serializer) { +func (e *EventHubs) SetSerializer(serializer telegraf.Serializer) { e.serializer = serializer } diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 08fed06e8b9ad..350c7357c5157 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -33,7 +32,7 @@ type Exec struct { Log telegraf.Logger `toml:"-"` runner Runner - serializer serializers.Serializer + serializer telegraf.Serializer } func (*Exec) SampleConfig() string { @@ -47,7 +46,7 @@ func (e *Exec) Init() error { } // SetSerializer sets the serializer for the output. -func (e *Exec) SetSerializer(serializer serializers.Serializer) { +func (e *Exec) SetSerializer(serializer telegraf.Serializer) { e.serializer = serializer } diff --git a/plugins/outputs/execd/execd.go b/plugins/outputs/execd/execd.go index f764d142e4625..f76aaee76e8fa 100644 --- a/plugins/outputs/execd/execd.go +++ b/plugins/outputs/execd/execd.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/process" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -29,14 +28,14 @@ type Execd struct { Log telegraf.Logger process *process.Process - serializer serializers.Serializer + serializer telegraf.Serializer } func (*Execd) SampleConfig() string { return sampleConfig } -func (e *Execd) SetSerializer(s serializers.Serializer) { +func (e *Execd) SetSerializer(s telegraf.Serializer) { e.serializer = s } diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index b005026c97a12..8987a5441d397 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -13,7 +13,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -32,14 +31,14 @@ type File struct { encoder internal.ContentEncoder writer io.Writer closers []io.Closer - serializer serializers.Serializer + serializer telegraf.Serializer } func (*File) SampleConfig() string { return sampleConfig } -func (f *File) SetSerializer(serializer serializers.Serializer) { +func (f *File) SetSerializer(serializer telegraf.Serializer) { f.serializer = serializer } diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 3063535135f99..59d6f3d06f819 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -25,7 +25,6 @@ import ( common_aws "github.com/influxdata/telegraf/plugins/common/aws" common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -56,7 +55,7 @@ type HTTP struct { Log telegraf.Logger `toml:"-"` client *http.Client - serializer serializers.Serializer + serializer telegraf.Serializer awsCfg *aws.Config common_aws.CredentialConfig @@ -70,7 +69,7 @@ func (*HTTP) SampleConfig() string { return sampleConfig } -func (h *HTTP) SetSerializer(serializer serializers.Serializer) { +func (h *HTTP) SetSerializer(serializer telegraf.Serializer) { h.serializer = serializer } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 52ca1cd29a7b7..0bba3b23dd099 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -21,7 +21,6 @@ import ( common_aws "github.com/influxdata/telegraf/plugins/common/aws" common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/oauth" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" "github.com/influxdata/telegraf/testutil" @@ -735,7 +734,7 @@ func TestBatchedUnbatched(t *testing.T) { jsonSerializer := &json.Serializer{} require.NoError(t, jsonSerializer.Init()) - s := map[string]serializers.Serializer{ + s := map[string]telegraf.Serializer{ "influx": influxSerializer, "json": jsonSerializer, } diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 13e3de3b831ea..cec722cd1584e 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -56,7 +55,7 @@ type Kafka struct { producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) producer sarama.SyncProducer - serializer serializers.Serializer + serializer telegraf.Serializer } type TopicSuffix struct { @@ -114,7 +113,7 @@ func (k *Kafka) GetTopicName(metric telegraf.Metric) (telegraf.Metric, string) { return metric, topicName } -func (k *Kafka) SetSerializer(serializer serializers.Serializer) { +func (k *Kafka) SetSerializer(serializer telegraf.Serializer) { k.serializer = serializer } diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index b8306efea9519..73c38e8a8b9b3 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf" common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -32,7 +31,7 @@ type ( Debug bool `toml:"debug"` Log telegraf.Logger `toml:"-"` - serializer serializers.Serializer + serializer telegraf.Serializer svc kinesisClient common_aws.CredentialConfig @@ -86,7 +85,7 @@ func (k *KinesisOutput) Close() error { return nil } -func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { +func (k *KinesisOutput) SetSerializer(serializer telegraf.Serializer) { k.serializer = serializer } diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 00f7f73c655ea..acc15d6734492 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -566,11 +565,7 @@ func (m *mockKinesisPutRecords) AssertRequests( } } -func createTestMetric( - t *testing.T, - name string, - serializer serializers.Serializer, -) (telegraf.Metric, []byte) { +func createTestMetric(t *testing.T, name string, serializer telegraf.Serializer) (telegraf.Metric, []byte) { metric := testutil.TestMetric(1, name) data, err := serializer.Serialize(metric) @@ -579,11 +574,7 @@ func createTestMetric( return metric, data } -func createTestMetrics( - t *testing.T, - count uint32, - serializer serializers.Serializer, -) ([]telegraf.Metric, [][]byte) { +func createTestMetrics(t *testing.T, count uint32, serializer telegraf.Serializer) ([]telegraf.Metric, [][]byte) { metrics := make([]telegraf.Metric, 0, count) metricsData := make([][]byte, 0, count) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index df57c4414191d..9e710c8468184 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/mqtt" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -36,7 +35,7 @@ type MQTT struct { mqtt.MqttConfig client mqtt.Client - serializer serializers.Serializer + serializer telegraf.Serializer generator *TopicNameGenerator homieDeviceNameGenerator *HomieGenerator @@ -118,7 +117,7 @@ func (m *MQTT) Connect() error { return err } -func (m *MQTT) SetSerializer(serializer serializers.Serializer) { +func (m *MQTT) SetSerializer(serializer telegraf.Serializer) { m.serializer = serializer } diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index ace7901fd6766..d00598d752115 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -17,7 +17,6 @@ import ( "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -39,7 +38,7 @@ type NATS struct { conn *nats.Conn jetstreamClient jetstream.JetStream jetstreamStreamConfig *jetstream.StreamConfig - serializer serializers.Serializer + serializer telegraf.Serializer } // StreamConfig is the configuration for creating stream @@ -83,7 +82,7 @@ func (*NATS) SampleConfig() string { return sampleConfig } -func (n *NATS) SetSerializer(serializer serializers.Serializer) { +func (n *NATS) SetSerializer(serializer telegraf.Serializer) { n.serializer = serializer } diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 2c771b1919507..ee66d7f4e0853 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -9,7 +9,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -21,14 +20,14 @@ type NSQ struct { Log telegraf.Logger `toml:"-"` producer *nsq.Producer - serializer serializers.Serializer + serializer telegraf.Serializer } func (*NSQ) SampleConfig() string { return sampleConfig } -func (n *NSQ) SetSerializer(serializer serializers.Serializer) { +func (n *NSQ) SetSerializer(serializer telegraf.Serializer) { n.serializer = serializer } diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index b2aec04b97bc6..26e7ce60dd50f 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -19,7 +19,6 @@ import ( "github.com/influxdata/telegraf/internal" common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -32,7 +31,7 @@ type SocketWriter struct { common_tls.ClientConfig Log telegraf.Logger `toml:"-"` - serializers.Serializer + serializer telegraf.Serializer encoder internal.ContentEncoder @@ -43,8 +42,8 @@ func (*SocketWriter) SampleConfig() string { return sampleConfig } -func (sw *SocketWriter) SetSerializer(s serializers.Serializer) { - sw.Serializer = s +func (sw *SocketWriter) SetSerializer(s telegraf.Serializer) { + sw.serializer = s } func (sw *SocketWriter) Connect() error { @@ -141,7 +140,7 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { } for _, m := range metrics { - bs, err := sw.Serialize(m) + bs, err := sw.serializer.Serialize(m) if err != nil { sw.Log.Debugf("Could not serialize metric: %v", err) continue diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 478d05a31ac78..c2de7a62f9709 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -19,7 +19,7 @@ func newSocketWriter(t *testing.T, addr string) *SocketWriter { require.NoError(t, serializer.Init()) return &SocketWriter{ Address: addr, - Serializer: serializer, + serializer: serializer, } } @@ -79,12 +79,12 @@ func TestSocketWriter_unixgram(t *testing.T) { func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { metrics := []telegraf.Metric{testutil.TestMetric(1, "test")} - mbs1out, err := sw.Serialize(metrics[0]) + mbs1out, err := sw.serializer.Serialize(metrics[0]) require.NoError(t, err) mbs1out, err = sw.encoder.Encode(mbs1out) require.NoError(t, err) metrics = append(metrics, testutil.TestMetric(2, "test")) - mbs2out, err := sw.Serialize(metrics[1]) + mbs2out, err := sw.serializer.Serialize(metrics[1]) require.NoError(t, err) mbs2out, err = sw.encoder.Encode(mbs2out) require.NoError(t, err) @@ -104,13 +104,13 @@ func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { func testSocketWriterPacket(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { metrics := []telegraf.Metric{testutil.TestMetric(1, "test")} - mbs1out, err := sw.Serialize(metrics[0]) + mbs1out, err := sw.serializer.Serialize(metrics[0]) require.NoError(t, err) mbs1out, err = sw.encoder.Encode(mbs1out) require.NoError(t, err) mbs1str := string(mbs1out) metrics = append(metrics, testutil.TestMetric(2, "test")) - mbs2out, err := sw.Serialize(metrics[1]) + mbs2out, err := sw.serializer.Serialize(metrics[1]) require.NoError(t, err) mbs2out, err = sw.encoder.Encode(mbs2out) require.NoError(t, err) @@ -191,7 +191,7 @@ func TestSocketWriter_Write_reconnect(t *testing.T) { wg.Wait() require.NoError(t, lerr) - mbsout, err := sw.Serialize(metrics[0]) + mbsout, err := sw.serializer.Serialize(metrics[0]) require.NoError(t, err) buf := make([]byte, 256) n, err := lconn.Read(buf) diff --git a/plugins/outputs/stomp/stomp.go b/plugins/outputs/stomp/stomp.go index 7e12762d0b600..4433abee8c678 100644 --- a/plugins/outputs/stomp/stomp.go +++ b/plugins/outputs/stomp/stomp.go @@ -14,7 +14,6 @@ import ( "github.com/influxdata/telegraf/config" common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -35,7 +34,7 @@ type STOMP struct { conn net.Conn stomp *stomp.Conn - serialize serializers.Serializer + serialize telegraf.Serializer } func (q *STOMP) Connect() error { @@ -71,7 +70,7 @@ func (q *STOMP) Connect() error { return nil } -func (q *STOMP) SetSerializer(serializer serializers.Serializer) { +func (q *STOMP) SetSerializer(serializer telegraf.Serializer) { q.serialize = serializer } diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 37e4798da7e35..a01033fb39c8b 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/prometheus" @@ -57,7 +56,7 @@ type SumoLogic struct { Log telegraf.Logger `toml:"-"` client *http.Client - serializer serializers.Serializer + serializer telegraf.Serializer headers map[string]string } @@ -66,7 +65,7 @@ func (*SumoLogic) SampleConfig() string { return sampleConfig } -func (s *SumoLogic) SetSerializer(serializer serializers.Serializer) { +func (s *SumoLogic) SetSerializer(serializer telegraf.Serializer) { s.serializer = serializer } @@ -82,7 +81,7 @@ func (s *SumoLogic) createClient() *http.Client { func (s *SumoLogic) Connect() error { s.headers = make(map[string]string) - var serializer serializers.Serializer + var serializer telegraf.Serializer if unwrapped, ok := s.serializer.(*models.RunningSerializer); ok { serializer = unwrapped.Serializer } else { diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go index f772c159c0e05..c8767c592100b 100644 --- a/plugins/outputs/websocket/websocket.go +++ b/plugins/outputs/websocket/websocket.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -42,7 +41,7 @@ type WebSocket struct { tls.ClientConfig conn *ws.Conn - serializer serializers.Serializer + serializer telegraf.Serializer } func (*WebSocket) SampleConfig() string { @@ -50,7 +49,7 @@ func (*WebSocket) SampleConfig() string { } // SetSerializer implements serializers.SerializerOutput. -func (w *WebSocket) SetSerializer(serializer serializers.Serializer) { +func (w *WebSocket) SetSerializer(serializer telegraf.Serializer) { w.serializer = serializer } diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 0407226f9fdda..481ffd35f0db8 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/telegraf/internal/process" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -28,7 +27,7 @@ type Execd struct { Log telegraf.Logger parser telegraf.Parser - serializer serializers.Serializer + serializer telegraf.Serializer acc telegraf.Accumulator process *process.Process } diff --git a/plugins/serializers/binary/binary.go b/plugins/serializers/binary/binary.go index 754a1b1710a3a..ce4148813b2be 100644 --- a/plugins/serializers/binary/binary.go +++ b/plugins/serializers/binary/binary.go @@ -101,7 +101,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("binary", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go index 5bd3c250348b4..827301fa7c8b6 100644 --- a/plugins/serializers/carbon2/carbon2.go +++ b/plugins/serializers/carbon2/carbon2.go @@ -112,16 +112,8 @@ func (s *Serializer) createObject(metric telegraf.Metric) []byte { func init() { serializers.Add("carbon2", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.Format = cfg.Carbon2Format - s.SanitizeReplaceChar = cfg.Carbon2SanitizeReplaceChar - - return nil -} diff --git a/plugins/serializers/cloudevents/cloudevents.go b/plugins/serializers/cloudevents/cloudevents.go index 0112d9fe4c084..a5dc2dcc54659 100644 --- a/plugins/serializers/cloudevents/cloudevents.go +++ b/plugins/serializers/cloudevents/cloudevents.go @@ -192,7 +192,7 @@ func (s *Serializer) createEvent(m telegraf.Metric) (*cloudevents.Event, error) func init() { serializers.Add("cloudevents", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) diff --git a/plugins/serializers/csv/csv.go b/plugins/serializers/csv/csv.go index d9ba94f74db9d..7931270f4b567 100644 --- a/plugins/serializers/csv/csv.go +++ b/plugins/serializers/csv/csv.go @@ -238,18 +238,8 @@ func (s *Serializer) writeDataOrdered(metric telegraf.Metric) error { func init() { serializers.Add("csv", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.TimestampFormat = cfg.TimestampFormat - s.Separator = cfg.CSVSeparator - s.Header = cfg.CSVHeader - s.Prefix = cfg.CSVPrefix - - return nil -} diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index cb84ae47ff837..60dc318278822 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -355,20 +355,8 @@ func compatibleSanitize(name, value string) string { func init() { serializers.Add("graphite", - func() serializers.Serializer { + func() telegraf.Serializer { return &GraphiteSerializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *GraphiteSerializer) InitFromConfig(cfg *serializers.Config) error { - s.Prefix = cfg.Prefix - s.Templates = cfg.Templates - s.StrictRegex = cfg.GraphiteStrictRegex - s.TagSupport = cfg.GraphiteTagSupport - s.TagSanitizeMode = cfg.GraphiteTagSanitizeMode - s.Separator = cfg.GraphiteSeparator - - return nil -} diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index 191fa7e516957..cfe7631a261a9 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -328,17 +328,8 @@ func appendStringField(buf []byte, value string) []byte { func init() { serializers.Add("influx", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.MaxLineBytes = cfg.InfluxMaxLineBytes - s.SortFields = cfg.InfluxSortFields - s.UintSupport = cfg.InfluxUintSupport - - return nil -} diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index e293089886c81..c91281868209a 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -162,19 +162,8 @@ func (s *Serializer) transform(obj interface{}) (interface{}, error) { func init() { serializers.Add("json", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.TimestampUnits = config.Duration(cfg.TimestampUnits) - s.TimestampFormat = cfg.TimestampFormat - s.Transformation = cfg.Transformation - s.NestedFieldsInclude = cfg.JSONNestedFieldInclude - s.NestedFieldsExclude = cfg.JSONNestedFieldExclude - - return nil -} diff --git a/plugins/serializers/msgpack/msgpack.go b/plugins/serializers/msgpack/msgpack.go index fef37e7c056ea..a1bb7346fc82d 100644 --- a/plugins/serializers/msgpack/msgpack.go +++ b/plugins/serializers/msgpack/msgpack.go @@ -40,13 +40,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("msgpack", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(_ *serializers.Config) error { - return nil -} diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go index fd2782959c457..d25e90276e3a1 100644 --- a/plugins/serializers/nowmetric/nowmetric.go +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -130,13 +130,8 @@ func verifyValue(v interface{}) bool { func init() { serializers.Add("nowmetric", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(_ *serializers.Config) error { - return nil -} diff --git a/plugins/serializers/prometheus/prometheus.go b/plugins/serializers/prometheus/prometheus.go index e2a7b34e8543a..21dc136c70524 100644 --- a/plugins/serializers/prometheus/prometheus.go +++ b/plugins/serializers/prometheus/prometheus.go @@ -88,18 +88,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("prometheus", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.FormatConfig.CompactEncoding = cfg.PrometheusCompactEncoding - s.FormatConfig.SortMetrics = cfg.PrometheusSortMetrics - s.FormatConfig.StringAsLabel = cfg.PrometheusStringAsLabel - s.FormatConfig.ExportTimestamp = cfg.PrometheusExportTimestamp - - return nil -} diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index 3f281eaed75a5..413f876cc963e 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -350,16 +350,8 @@ func (sl sortableLabels) Swap(i, j int) { func init() { serializers.Add("prometheusremotewrite", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.SortMetrics = cfg.PrometheusSortMetrics - s.StringAsLabel = cfg.PrometheusStringAsLabel - - return nil -} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 03b9e9d06aa1b..27881d04e11aa 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -1,14 +1,9 @@ package serializers -import ( - "fmt" - "time" - - "github.com/influxdata/telegraf" -) +import "github.com/influxdata/telegraf" // Creator is the function to create a new serializer -type Creator func() Serializer +type Creator func() telegraf.Serializer // Serializers contains the registry of all known serializers (following the new style) var Serializers = make(map[string]Creator) @@ -17,157 +12,3 @@ var Serializers = make(map[string]Creator) func Add(name string, creator Creator) { Serializers[name] = creator } - -// SerializerOutput is an interface for output plugins that are able to -// serialize telegraf metrics into arbitrary data formats. -type SerializerOutput interface { - // SetSerializer sets the serializer function for the interface. - SetSerializer(serializer Serializer) -} - -// Serializer is an interface defining functions that a serializer plugin must -// satisfy. -// -// Implementations of this interface should be reentrant but are not required -// to be thread-safe. -type Serializer interface { - // Serialize takes a single telegraf metric and turns it into a byte buffer. - // separate metrics should be separated by a newline, and there should be - // a newline at the end of the buffer. - // - // New plugins should use SerializeBatch instead to allow for non-line - // delimited metrics. - Serialize(metric telegraf.Metric) ([]byte, error) - - // SerializeBatch takes an array of telegraf metric and serializes it into - // a byte buffer. This method is not required to be suitable for use with - // line oriented framing. - SerializeBatch(metrics []telegraf.Metric) ([]byte, error) -} - -// SerializerCompatibility is an interface for backward-compatible initialization of serializers -type SerializerCompatibility interface { - // InitFromConfig sets the serializers internal variables from the old-style config - InitFromConfig(config *Config) error -} - -// Config is a struct that covers the data types needed for all serializer types, -// and can be used to instantiate _any_ of the serializers. -type Config struct { - // DataFormat can be one of the serializer types listed in NewSerializer. - DataFormat string `toml:"data_format"` - - // Carbon2 metric format. - Carbon2Format string `toml:"carbon2_format"` - - // Character used for metric name sanitization in Carbon2. - Carbon2SanitizeReplaceChar string `toml:"carbon2_sanitize_replace_char"` - - // Separator for CSV - CSVSeparator string `toml:"csv_separator"` - - // Output a CSV header for naming the columns - CSVHeader bool `toml:"csv_header"` - - // Prefix the tag and field columns for CSV format - CSVPrefix bool `toml:"csv_column_prefix"` - - // Support tags in graphite protocol - GraphiteTagSupport bool `toml:"graphite_tag_support"` - - // Support tags which follow the spec - GraphiteTagSanitizeMode string `toml:"graphite_tag_sanitize_mode"` - - // Character for separating metric name and field for Graphite tags - GraphiteSeparator string `toml:"graphite_separator"` - - // Regex string - GraphiteStrictRegex string `toml:"graphite_strict_sanitize_regex"` - - // Maximum line length in bytes; influx format only - InfluxMaxLineBytes int `toml:"influx_max_line_bytes"` - - // Sort field keys, set to true only when debugging as it less performant - // than unsorted fields; influx format only - InfluxSortFields bool `toml:"influx_sort_fields"` - - // Support unsigned integer output; influx format only - InfluxUintSupport bool `toml:"influx_uint_support"` - - // Omit timestamp from output; influx format only - InfluxOmitTimestamp bool `toml:"influx_omit_timestamp"` - - // Prefix to add to all measurements, only supports Graphite - Prefix string `toml:"prefix"` - - // Template for converting telegraf metrics into Graphite - // only supports Graphite - Template string `toml:"template"` - - // Templates same Template, but multiple - Templates []string `toml:"templates"` - - // Timestamp units to use for JSON formatted output - TimestampUnits time.Duration `toml:"timestamp_units"` - - // Timestamp format to use for JSON and CSV formatted output - TimestampFormat string `toml:"timestamp_format"` - - // Transformation as JSONata expression to use for JSON formatted output - Transformation string `toml:"transformation"` - - // Field filter for interpreting data as nested JSON for JSON serializer - JSONNestedFieldInclude []string `toml:"json_nested_fields_include"` - JSONNestedFieldExclude []string `toml:"json_nested_fields_exclude"` - - // Include HEC routing fields for splunkmetric output - HecRouting bool `toml:"hec_routing"` - - // Enable Splunk MultiMetric output (Splunk 8.0+) - SplunkmetricMultiMetric bool `toml:"splunkmetric_multi_metric"` - - // Omit the Splunk Event "metric" tag - SplunkmetricOmitEventTag bool `toml:"splunkmetric_omit_event_tag"` - - // Point tags to use as the source name for Wavefront (if none found, host will be used). - WavefrontSourceOverride []string `toml:"wavefront_source_override"` - - // Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront - // When enabled forward slash (/) and comma (,) will be accepted - WavefrontUseStrict bool `toml:"wavefront_use_strict"` - - // Convert "_" in prefixes to "." for Wavefront - WavefrontDisablePrefixConversion bool `toml:"wavefront_disable_prefix_conversion"` - - // Include the metric timestamp on each sample. - PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"` - - // Sort prometheus metric families and metric samples. Useful for - // debugging. - PrometheusSortMetrics bool `toml:"prometheus_sort_metrics"` - - // Output string fields as metric labels; when false string fields are - // discarded. - PrometheusStringAsLabel bool `toml:"prometheus_string_as_label"` - - // Encode metrics without HELP metadata. This helps reduce the payload size. - PrometheusCompactEncoding bool `toml:"prometheus_compact_encoding"` -} - -// NewSerializer a Serializer interface based on the given config. -func NewSerializer(config *Config) (Serializer, error) { - creator, found := Serializers[config.DataFormat] - if !found { - return nil, fmt.Errorf("invalid data format: %s", config.DataFormat) - } - - // Try to create new-style serializers the old way... - serializer := creator() - p, ok := serializer.(SerializerCompatibility) - if !ok { - return nil, fmt.Errorf("serializer for %q cannot be created the old way", config.DataFormat) - } - err := p.InitFromConfig(config) - - return serializer, err -} diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 4e34ba573d2f6..4eb8b982ae373 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -210,17 +210,8 @@ func verifyValue(v interface{}) (value interface{}, valid bool) { func init() { serializers.Add("splunkmetric", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.HecRouting = cfg.HecRouting - s.MultiMetric = cfg.SplunkmetricMultiMetric - s.OmitEventTag = cfg.SplunkmetricOmitEventTag - - return nil -} diff --git a/plugins/serializers/template/template.go b/plugins/serializers/template/template.go index 0527fd4a9ba20..39ea176e864a7 100644 --- a/plugins/serializers/template/template.go +++ b/plugins/serializers/template/template.go @@ -95,7 +95,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func init() { serializers.Add("template", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index f38c97589d298..e326a85d00997 100644 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -189,18 +189,8 @@ func (b *buffer) WriteFloat64(val float64) { func init() { serializers.Add("wavefront", - func() serializers.Serializer { + func() telegraf.Serializer { return &Serializer{} }, ) } - -// InitFromConfig is a compatibility function to construct the parser the old way -func (s *Serializer) InitFromConfig(cfg *serializers.Config) error { - s.Prefix = cfg.Prefix - s.UseStrict = cfg.WavefrontUseStrict - s.SourceOverride = cfg.WavefrontSourceOverride - s.DisablePrefixConversions = cfg.WavefrontDisablePrefixConversion - - return nil -} From 56d9a29d48ed075925309d68b2db60dc2a81886d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 5 Dec 2024 17:35:28 +0100 Subject: [PATCH 123/170] chore: Fix linter findings for `revive:exported` in `plugins/inputs/o*` (#16224) --- plugins/inputs/opcua/opcua.go | 12 +- plugins/inputs/opcua/opcua_test.go | 106 ++++++------- plugins/inputs/opcua/read_client.go | 26 +-- .../inputs/opcua_listener/opcua_listener.go | 43 +++-- .../opcua_listener/opcua_listener_test.go | 148 +++++++++--------- .../inputs/opcua_listener/subscribe_client.go | 20 +-- plugins/inputs/openldap/openldap.go | 82 +++++----- plugins/inputs/openntpd/openntpd.go | 93 ++++++----- plugins/inputs/openntpd/openntpd_test.go | 14 +- .../opensearch_query/aggregation.bucket.go | 14 +- .../inputs/opensearch_query/aggregation.go | 16 +- .../opensearch_query/aggregation.metric.go | 4 +- .../opensearch_query/aggregation.response.go | 12 +- .../opensearch_query/opensearch_query.go | 90 +++++------ .../opensearch_query/opensearch_query_test.go | 17 +- plugins/inputs/opensearch_query/query.go | 8 +- plugins/inputs/opensmtpd/opensmtpd.go | 63 ++++---- plugins/inputs/opensmtpd/opensmtpd_test.go | 4 +- plugins/inputs/openstack/openstack.go | 33 ++-- plugins/inputs/opentelemetry/opentelemetry.go | 8 +- .../opentelemetry/opentelemetry_test.go | 2 +- .../inputs/openweathermap/openweathermap.go | 6 +- plugins/inputs/openweathermap/types.go | 10 +- 23 files changed, 407 insertions(+), 424 deletions(-) diff --git a/plugins/inputs/opcua/opcua.go b/plugins/inputs/opcua/opcua.go index dd0109adc5f8a..839b3d99cabe2 100644 --- a/plugins/inputs/opcua/opcua.go +++ b/plugins/inputs/opcua/opcua.go @@ -16,26 +16,24 @@ import ( var sampleConfig string type OpcUA struct { - ReadClientConfig + readClientConfig Log telegraf.Logger `toml:"-"` - client *ReadClient + client *readClient } func (*OpcUA) SampleConfig() string { return sampleConfig } -// Init Initialise all required objects func (o *OpcUA) Init() (err error) { - o.client, err = o.ReadClientConfig.CreateReadClient(o.Log) + o.client, err = o.readClientConfig.createReadClient(o.Log) return err } -// Gather defines what data the plugin will gather. func (o *OpcUA) Gather(acc telegraf.Accumulator) error { // Will (re)connect if the client is disconnected - metrics, err := o.client.CurrentValues() + metrics, err := o.client.currentValues() if err != nil { return err } @@ -51,7 +49,7 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("opcua", func() telegraf.Input { return &OpcUA{ - ReadClientConfig: ReadClientConfig{ + readClientConfig: readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", diff --git a/plugins/inputs/opcua/opcua_test.go b/plugins/inputs/opcua/opcua_test.go index ee18778a9141d..2c0bc559128a7 100644 --- a/plugins/inputs/opcua/opcua_test.go +++ b/plugins/inputs/opcua/opcua_test.go @@ -19,19 +19,19 @@ import ( const servicePort = "4840" -type OPCTags struct { - Name string - Namespace string - IdentifierType string - Identifier string - Want interface{} +type opcTags struct { + name string + namespace string + identifierType string + identifier string + want interface{} } -func MapOPCTag(tags OPCTags) (out input.NodeSettings) { - out.FieldName = tags.Name - out.Namespace = tags.Namespace - out.IdentifierType = tags.IdentifierType - out.Identifier = tags.Identifier +func mapOPCTag(tags opcTags) (out input.NodeSettings) { + out.FieldName = tags.name + out.Namespace = tags.namespace + out.IdentifierType = tags.identifierType + out.Identifier = tags.identifier return out } @@ -52,13 +52,13 @@ func TestGetDataBadNodeContainerIntegration(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "1", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -83,14 +83,14 @@ func TestGetDataBadNodeContainerIntegration(t *testing.T) { } for _, tags := range testopctags { - g.Nodes = append(g.Nodes, MapOPCTag(tags)) + g.Nodes = append(g.Nodes, mapOPCTag(tags)) } readConfig.Groups = append(readConfig.Groups, g) logger := &testutil.CaptureLogger{} - readClient, err := readConfig.CreateReadClient(logger) + readClient, err := readConfig.createReadClient(logger) require.NoError(t, err) - err = readClient.Connect() + err = readClient.connect() require.NoError(t, err) } @@ -111,7 +111,7 @@ func TestReadClientIntegration(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -120,7 +120,7 @@ func TestReadClientIntegration(t *testing.T) { {"DateTime", "1", "i", "51037", "0001-01-01T00:00:00Z"}, } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -138,17 +138,17 @@ func TestReadClientIntegration(t *testing.T) { } for _, tags := range testopctags { - readConfig.RootNodes = append(readConfig.RootNodes, MapOPCTag(tags)) + readConfig.RootNodes = append(readConfig.RootNodes, mapOPCTag(tags)) } - client, err := readConfig.CreateReadClient(testutil.Logger{}) + client, err := readConfig.createReadClient(testutil.Logger{}) require.NoError(t, err) - err = client.Connect() - require.NoError(t, err, "Connect") + err = client.connect() + require.NoError(t, err) for i, v := range client.LastReceivedData { - require.Equal(t, testopctags[i].Want, v.Value) + require.Equal(t, testopctags[i].want, v.Value) } } @@ -168,7 +168,7 @@ func TestReadClientIntegrationAdditionalFields(t *testing.T) { require.NoError(t, container.Start(), "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -196,17 +196,17 @@ func TestReadClientIntegrationAdditionalFields(t *testing.T) { for i, x := range testopctags { now := time.Now() tags := map[string]string{ - "id": fmt.Sprintf("ns=%s;%s=%s", x.Namespace, x.IdentifierType, x.Identifier), + "id": fmt.Sprintf("ns=%s;%s=%s", x.namespace, x.identifierType, x.identifier), } fields := map[string]interface{}{ - x.Name: x.Want, + x.name: x.want, "Quality": testopcquality[i], "DataType": testopctypes[i], } expectedopcmetrics = append(expectedopcmetrics, metric.New("testing", tags, fields, now)) } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -225,13 +225,13 @@ func TestReadClientIntegrationAdditionalFields(t *testing.T) { } for _, tags := range testopctags { - readConfig.RootNodes = append(readConfig.RootNodes, MapOPCTag(tags)) + readConfig.RootNodes = append(readConfig.RootNodes, mapOPCTag(tags)) } - client, err := readConfig.CreateReadClient(testutil.Logger{}) + client, err := readConfig.createReadClient(testutil.Logger{}) require.NoError(t, err) - require.NoError(t, client.Connect()) + require.NoError(t, client.connect()) actualopcmetrics := make([]telegraf.Metric, 0, len(client.LastReceivedData)) for i := range client.LastReceivedData { @@ -258,13 +258,13 @@ func TestReadClientIntegrationWithPasswordAuth(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, } - readConfig := ReadClientConfig{ + readConfig := readClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -284,17 +284,17 @@ func TestReadClientIntegrationWithPasswordAuth(t *testing.T) { } for _, tags := range testopctags { - readConfig.RootNodes = append(readConfig.RootNodes, MapOPCTag(tags)) + readConfig.RootNodes = append(readConfig.RootNodes, mapOPCTag(tags)) } - client, err := readConfig.CreateReadClient(testutil.Logger{}) + client, err := readConfig.createReadClient(testutil.Logger{}) require.NoError(t, err) - err = client.Connect() - require.NoError(t, err, "Connect") + err = client.connect() + require.NoError(t, err) for i, v := range client.LastReceivedData { - require.Equal(t, testopctags[i].Want, v.Value) + require.Equal(t, testopctags[i].want, v.Value) } } @@ -369,17 +369,17 @@ use_unregistered_reads = true o, ok := c.Inputs[0].Input.(*OpcUA) require.True(t, ok) - require.Equal(t, "localhost", o.ReadClientConfig.MetricName) - require.Equal(t, "opc.tcp://localhost:4840", o.ReadClientConfig.Endpoint) - require.Equal(t, config.Duration(10*time.Second), o.ReadClientConfig.ConnectTimeout) - require.Equal(t, config.Duration(5*time.Second), o.ReadClientConfig.RequestTimeout) - require.Equal(t, "auto", o.ReadClientConfig.SecurityPolicy) - require.Equal(t, "auto", o.ReadClientConfig.SecurityMode) - require.Equal(t, "/etc/telegraf/cert.pem", o.ReadClientConfig.Certificate) - require.Equal(t, "/etc/telegraf/key.pem", o.ReadClientConfig.PrivateKey) - require.Equal(t, "Anonymous", o.ReadClientConfig.AuthMethod) - require.True(t, o.ReadClientConfig.Username.Empty()) - require.True(t, o.ReadClientConfig.Password.Empty()) + require.Equal(t, "localhost", o.readClientConfig.MetricName) + require.Equal(t, "opc.tcp://localhost:4840", o.readClientConfig.Endpoint) + require.Equal(t, config.Duration(10*time.Second), o.readClientConfig.ConnectTimeout) + require.Equal(t, config.Duration(5*time.Second), o.readClientConfig.RequestTimeout) + require.Equal(t, "auto", o.readClientConfig.SecurityPolicy) + require.Equal(t, "auto", o.readClientConfig.SecurityMode) + require.Equal(t, "/etc/telegraf/cert.pem", o.readClientConfig.Certificate) + require.Equal(t, "/etc/telegraf/key.pem", o.readClientConfig.PrivateKey) + require.Equal(t, "Anonymous", o.readClientConfig.AuthMethod) + require.True(t, o.readClientConfig.Username.Empty()) + require.True(t, o.readClientConfig.Password.Empty()) require.Equal(t, []input.NodeSettings{ { FieldName: "name", @@ -396,7 +396,7 @@ use_unregistered_reads = true TagsSlice: [][]string{{"tag0", "val0"}, {"tag00", "val00"}}, DefaultTags: map[string]string{"tag6": "val6"}, }, - }, o.ReadClientConfig.RootNodes) + }, o.readClientConfig.RootNodes) require.Equal(t, []input.NodeGroupSettings{ { MetricName: "foo", @@ -424,10 +424,10 @@ use_unregistered_reads = true Identifier: "4001", }}, }, - }, o.ReadClientConfig.Groups) - require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.ReadClientConfig.Workarounds) - require.Equal(t, ReadClientWorkarounds{UseUnregisteredReads: true}, o.ReadClientConfig.ReadClientWorkarounds) - require.Equal(t, []string{"DataType"}, o.ReadClientConfig.OptionalFields) + }, o.readClientConfig.Groups) + require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.readClientConfig.Workarounds) + require.Equal(t, readClientWorkarounds{UseUnregisteredReads: true}, o.readClientConfig.ReadClientWorkarounds) + require.Equal(t, []string{"DataType"}, o.readClientConfig.OptionalFields) err = o.Init() require.NoError(t, err) require.Len(t, o.client.NodeMetricMapping, 5, "incorrect number of nodes") diff --git a/plugins/inputs/opcua/read_client.go b/plugins/inputs/opcua/read_client.go index 25d8a3f9576e3..f8e04e02ea568 100644 --- a/plugins/inputs/opcua/read_client.go +++ b/plugins/inputs/opcua/read_client.go @@ -15,33 +15,33 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -type ReadClientWorkarounds struct { +type readClientWorkarounds struct { UseUnregisteredReads bool `toml:"use_unregistered_reads"` } -type ReadClientConfig struct { +type readClientConfig struct { ReadRetryTimeout config.Duration `toml:"read_retry_timeout"` ReadRetries uint64 `toml:"read_retry_count"` - ReadClientWorkarounds ReadClientWorkarounds `toml:"request_workarounds"` + ReadClientWorkarounds readClientWorkarounds `toml:"request_workarounds"` input.InputClientConfig } -// ReadClient Requests the current values from the required nodes when gather is called. -type ReadClient struct { +// readClient Requests the current values from the required nodes when gather is called. +type readClient struct { *input.OpcUAInputClient ReadRetryTimeout time.Duration ReadRetries uint64 ReadSuccess selfstat.Stat ReadError selfstat.Stat - Workarounds ReadClientWorkarounds + Workarounds readClientWorkarounds // internal values reqIDs []*ua.ReadValueID ctx context.Context } -func (rc *ReadClientConfig) CreateReadClient(log telegraf.Logger) (*ReadClient, error) { +func (rc *readClientConfig) createReadClient(log telegraf.Logger) (*readClient, error) { inputClient, err := rc.InputClientConfig.CreateInputClient(log) if err != nil { return nil, err @@ -55,7 +55,7 @@ func (rc *ReadClientConfig) CreateReadClient(log telegraf.Logger) (*ReadClient, rc.ReadRetryTimeout = config.Duration(100 * time.Millisecond) } - return &ReadClient{ + return &readClient{ OpcUAInputClient: inputClient, ReadRetryTimeout: time.Duration(rc.ReadRetryTimeout), ReadRetries: rc.ReadRetries, @@ -65,7 +65,7 @@ func (rc *ReadClientConfig) CreateReadClient(log telegraf.Logger) (*ReadClient, }, nil } -func (o *ReadClient) Connect() error { +func (o *readClient) connect() error { o.ctx = context.Background() if err := o.OpcUAClient.Connect(o.ctx); err != nil { @@ -103,14 +103,14 @@ func (o *ReadClient) Connect() error { return nil } -func (o *ReadClient) ensureConnected() error { +func (o *readClient) ensureConnected() error { if o.State() == opcua.Disconnected || o.State() == opcua.Closed { - return o.Connect() + return o.connect() } return nil } -func (o *ReadClient) CurrentValues() ([]telegraf.Metric, error) { +func (o *readClient) currentValues() ([]telegraf.Metric, error) { if err := o.ensureConnected(); err != nil { return nil, err } @@ -142,7 +142,7 @@ func (o *ReadClient) CurrentValues() ([]telegraf.Metric, error) { return metrics, nil } -func (o *ReadClient) read() error { +func (o *readClient) read() error { req := &ua.ReadRequest{ MaxAge: 2000, TimestampsToReturn: ua.TimestampsToReturnBoth, diff --git a/plugins/inputs/opcua_listener/opcua_listener.go b/plugins/inputs/opcua_listener/opcua_listener.go index 9399f9c971869..6085c90c9f94c 100644 --- a/plugins/inputs/opcua_listener/opcua_listener.go +++ b/plugins/inputs/opcua_listener/opcua_listener.go @@ -15,8 +15,8 @@ import ( ) type OpcUaListener struct { - SubscribeClientConfig - client *SubscribeClient + subscribeClientConfig + client *subscribeClient Log telegraf.Logger `toml:"-"` } @@ -36,20 +36,35 @@ func (o *OpcUaListener) Init() (err error) { default: return fmt.Errorf("unknown setting %q for 'connect_fail_behavior'", o.ConnectFailBehavior) } - o.client, err = o.SubscribeClientConfig.CreateSubscribeClient(o.Log) + o.client, err = o.subscribeClientConfig.createSubscribeClient(o.Log) return err } +func (o *OpcUaListener) Start(acc telegraf.Accumulator) error { + return o.connect(acc) +} + func (o *OpcUaListener) Gather(acc telegraf.Accumulator) error { - if o.client.State() == opcua.Connected || o.SubscribeClientConfig.ConnectFailBehavior == "ignore" { + if o.client.State() == opcua.Connected || o.subscribeClientConfig.ConnectFailBehavior == "ignore" { return nil } return o.connect(acc) } +func (o *OpcUaListener) Stop() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + select { + case <-o.client.stop(ctx): + o.Log.Infof("Unsubscribed OPC UA successfully") + case <-ctx.Done(): // Timeout context + o.Log.Warn("Timeout while stopping OPC UA subscription") + } + cancel() +} + func (o *OpcUaListener) connect(acc telegraf.Accumulator) error { ctx := context.Background() - ch, err := o.client.StartStreamValues(ctx) + ch, err := o.client.startStreamValues(ctx) if err != nil { return err } @@ -68,26 +83,10 @@ func (o *OpcUaListener) connect(acc telegraf.Accumulator) error { return nil } -func (o *OpcUaListener) Start(acc telegraf.Accumulator) error { - return o.connect(acc) -} - -func (o *OpcUaListener) Stop() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - select { - case <-o.client.Stop(ctx): - o.Log.Infof("Unsubscribed OPC UA successfully") - case <-ctx.Done(): // Timeout context - o.Log.Warn("Timeout while stopping OPC UA subscription") - } - cancel() -} - -// Add this plugin to telegraf func init() { inputs.Add("opcua_listener", func() telegraf.Input { return &OpcUaListener{ - SubscribeClientConfig: SubscribeClientConfig{ + subscribeClientConfig: subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", diff --git a/plugins/inputs/opcua_listener/opcua_listener_test.go b/plugins/inputs/opcua_listener/opcua_listener_test.go index 86c384b76d709..5484252bbb56d 100644 --- a/plugins/inputs/opcua_listener/opcua_listener_test.go +++ b/plugins/inputs/opcua_listener/opcua_listener_test.go @@ -21,25 +21,25 @@ import ( const servicePort = "4840" -type OPCTags struct { - Name string - Namespace string - IdentifierType string - Identifier string - Want interface{} +type opcTags struct { + name string + namespace string + identifierType string + identifier string + want interface{} } -func MapOPCTag(tags OPCTags) (out input.NodeSettings) { - out.FieldName = tags.Name - out.Namespace = tags.Namespace - out.IdentifierType = tags.IdentifierType - out.Identifier = tags.Identifier +func mapOPCTag(tags opcTags) (out input.NodeSettings) { + out.FieldName = tags.name + out.Namespace = tags.namespace + out.IdentifierType = tags.identifierType + out.Identifier = tags.identifier return out } func TestInitPluginWithBadConnectFailBehaviorValue(t *testing.T) { plugin := OpcUaListener{ - SubscribeClientConfig: SubscribeClientConfig{ + subscribeClientConfig: subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://notarealserver:4840", @@ -69,7 +69,7 @@ func TestStartPlugin(t *testing.T) { acc := &testutil.Accumulator{} plugin := OpcUaListener{ - SubscribeClientConfig: SubscribeClientConfig{ + subscribeClientConfig: subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://notarealserver:4840", @@ -86,17 +86,17 @@ func TestStartPlugin(t *testing.T) { }, Log: testutil.Logger{}, } - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, } for _, tags := range testopctags { - plugin.SubscribeClientConfig.RootNodes = append(plugin.SubscribeClientConfig.RootNodes, MapOPCTag(tags)) + plugin.subscribeClientConfig.RootNodes = append(plugin.subscribeClientConfig.RootNodes, mapOPCTag(tags)) } require.NoError(t, plugin.Init()) err := plugin.Start(acc) require.ErrorContains(t, err, "could not resolve address") - plugin.SubscribeClientConfig.ConnectFailBehavior = "ignore" + plugin.subscribeClientConfig.ConnectFailBehavior = "ignore" require.NoError(t, plugin.Init()) require.NoError(t, plugin.Start(acc)) require.Equal(t, opcua.Disconnected, plugin.client.OpcUAClient.State()) @@ -110,7 +110,7 @@ func TestStartPlugin(t *testing.T) { wait.ForLog("TCP network layer listening on opc.tcp://"), ), } - plugin.SubscribeClientConfig.ConnectFailBehavior = "retry" + plugin.subscribeClientConfig.ConnectFailBehavior = "retry" require.NoError(t, plugin.Init()) require.NoError(t, plugin.Start(acc)) require.Equal(t, opcua.Disconnected, plugin.client.OpcUAClient.State()) @@ -144,7 +144,7 @@ func TestSubscribeClientIntegration(t *testing.T) { require.NoError(t, err, "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -154,12 +154,12 @@ func TestSubscribeClientIntegration(t *testing.T) { } tagsRemaining := make([]string, 0, len(testopctags)) for i, tag := range testopctags { - if tag.Want != nil { - tagsRemaining = append(tagsRemaining, testopctags[i].Name) + if tag.want != nil { + tagsRemaining = append(tagsRemaining, testopctags[i].name) } } - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -177,9 +177,9 @@ func TestSubscribeClientIntegration(t *testing.T) { SubscriptionInterval: 0, } for _, tags := range testopctags { - subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, MapOPCTag(tags)) + subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, mapOPCTag(tags)) } - o, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + o, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.NoError(t, err) // give initial setup a couple extra attempts, as on CircleCI this can be @@ -188,12 +188,12 @@ func TestSubscribeClientIntegration(t *testing.T) { return o.SetupOptions() == nil }, 5*time.Second, 10*time.Millisecond) - err = o.Connect() + err = o.connect() require.NoError(t, err, "Connection failed") ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - res, err := o.StartStreamValues(ctx) + res, err := o.startStreamValues(ctx) require.Equal(t, opcua.Connected, o.State()) require.NoError(t, err) @@ -202,16 +202,16 @@ func TestSubscribeClientIntegration(t *testing.T) { case m := <-res: for fieldName, fieldValue := range m.Fields() { for _, tag := range testopctags { - if fieldName != tag.Name { + if fieldName != tag.name { continue } - if tag.Want == nil { - t.Errorf("Tag: %s has value: %v", tag.Name, fieldValue) + if tag.want == nil { + t.Errorf("Tag: %s has value: %v", tag.name, fieldValue) return } - require.Equal(t, tag.Want, fieldValue) + require.Equal(t, tag.want, fieldValue) newRemaining := make([]string, 0, len(tagsRemaining)) for _, remainingTag := range tagsRemaining { @@ -257,7 +257,7 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { require.NoError(t, container.Start(), "failed to start container") defer container.Terminate() - testopctags := []OPCTags{ + testopctags := []opcTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, @@ -285,10 +285,10 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { for i, x := range testopctags { now := time.Now() tags := map[string]string{ - "id": fmt.Sprintf("ns=%s;%s=%s", x.Namespace, x.IdentifierType, x.Identifier), + "id": fmt.Sprintf("ns=%s;%s=%s", x.namespace, x.identifierType, x.identifier), } fields := map[string]interface{}{ - x.Name: x.Want, + x.name: x.want, "Quality": testopcquality[i], "DataType": testopctypes[i], } @@ -297,12 +297,12 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { tagsRemaining := make([]string, 0, len(testopctags)) for i, tag := range testopctags { - if tag.Want != nil { - tagsRemaining = append(tagsRemaining, testopctags[i].Name) + if tag.want != nil { + tagsRemaining = append(tagsRemaining, testopctags[i].name) } } - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]), @@ -321,9 +321,9 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { SubscriptionInterval: 0, } for _, tags := range testopctags { - subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, MapOPCTag(tags)) + subscribeConfig.RootNodes = append(subscribeConfig.RootNodes, mapOPCTag(tags)) } - o, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + o, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.NoError(t, err) // give initial setup a couple extra attempts, as on CircleCI this can be @@ -332,11 +332,11 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { return o.SetupOptions() == nil }, 5*time.Second, 10*time.Millisecond) - require.NoError(t, o.Connect(), "Connection failed") + require.NoError(t, o.connect(), "Connection failed") ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - res, err := o.StartStreamValues(ctx) + res, err := o.startStreamValues(ctx) require.NoError(t, err) for { @@ -344,12 +344,12 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) { case m := <-res: for fieldName, fieldValue := range m.Fields() { for _, tag := range testopctags { - if fieldName != tag.Name { + if fieldName != tag.name { continue } // nil-value tags should not be sent from server, error if one does - if tag.Want == nil { - t.Errorf("Tag: %s has value: %v", tag.Name, fieldValue) + if tag.want == nil { + t.Errorf("Tag: %s has value: %v", tag.name, fieldValue) return } @@ -434,19 +434,19 @@ additional_valid_status_codes = ["0xC0"] o, ok := c.Inputs[0].Input.(*OpcUaListener) require.True(t, ok) - require.Equal(t, "localhost", o.SubscribeClientConfig.MetricName) - require.Equal(t, "opc.tcp://localhost:4840", o.SubscribeClientConfig.Endpoint) - require.Equal(t, config.Duration(10*time.Second), o.SubscribeClientConfig.ConnectTimeout) - require.Equal(t, config.Duration(5*time.Second), o.SubscribeClientConfig.RequestTimeout) - require.Equal(t, config.Duration(200*time.Millisecond), o.SubscribeClientConfig.SubscriptionInterval) - require.Equal(t, "error", o.SubscribeClientConfig.ConnectFailBehavior) - require.Equal(t, "auto", o.SubscribeClientConfig.SecurityPolicy) - require.Equal(t, "auto", o.SubscribeClientConfig.SecurityMode) - require.Equal(t, "/etc/telegraf/cert.pem", o.SubscribeClientConfig.Certificate) - require.Equal(t, "/etc/telegraf/key.pem", o.SubscribeClientConfig.PrivateKey) - require.Equal(t, "Anonymous", o.SubscribeClientConfig.AuthMethod) - require.True(t, o.SubscribeClientConfig.Username.Empty()) - require.True(t, o.SubscribeClientConfig.Password.Empty()) + require.Equal(t, "localhost", o.subscribeClientConfig.MetricName) + require.Equal(t, "opc.tcp://localhost:4840", o.subscribeClientConfig.Endpoint) + require.Equal(t, config.Duration(10*time.Second), o.subscribeClientConfig.ConnectTimeout) + require.Equal(t, config.Duration(5*time.Second), o.subscribeClientConfig.RequestTimeout) + require.Equal(t, config.Duration(200*time.Millisecond), o.subscribeClientConfig.SubscriptionInterval) + require.Equal(t, "error", o.subscribeClientConfig.ConnectFailBehavior) + require.Equal(t, "auto", o.subscribeClientConfig.SecurityPolicy) + require.Equal(t, "auto", o.subscribeClientConfig.SecurityMode) + require.Equal(t, "/etc/telegraf/cert.pem", o.subscribeClientConfig.Certificate) + require.Equal(t, "/etc/telegraf/key.pem", o.subscribeClientConfig.PrivateKey) + require.Equal(t, "Anonymous", o.subscribeClientConfig.AuthMethod) + require.True(t, o.subscribeClientConfig.Username.Empty()) + require.True(t, o.subscribeClientConfig.Password.Empty()) require.Equal(t, []input.NodeSettings{ { FieldName: "name", @@ -460,7 +460,7 @@ additional_valid_status_codes = ["0xC0"] IdentifierType: "s", Identifier: "two", }, - }, o.SubscribeClientConfig.RootNodes) + }, o.subscribeClientConfig.RootNodes) require.Equal(t, []input.NodeGroupSettings{ { MetricName: "foo", @@ -484,9 +484,9 @@ additional_valid_status_codes = ["0xC0"] TagsSlice: [][]string{{"tag1", "override"}}, }}, }, - }, o.SubscribeClientConfig.Groups) - require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.SubscribeClientConfig.Workarounds) - require.Equal(t, []string{"DataType"}, o.SubscribeClientConfig.OptionalFields) + }, o.subscribeClientConfig.Groups) + require.Equal(t, opcua.OpcUAWorkarounds{AdditionalValidStatusCodes: []string{"0xC0"}}, o.subscribeClientConfig.Workarounds) + require.Equal(t, []string{"DataType"}, o.subscribeClientConfig.OptionalFields) } func TestSubscribeClientConfigWithMonitoringParams(t *testing.T) { @@ -548,11 +548,11 @@ deadband_value = 100.0 }, }}, }, - }, o.SubscribeClientConfig.Groups) + }, o.subscribeClientConfig.Groups) } func TestSubscribeClientConfigInvalidTrigger(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -581,12 +581,12 @@ func TestSubscribeClientConfigInvalidTrigger(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "trigger 'not_valid' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigMissingTrigger(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -615,12 +615,12 @@ func TestSubscribeClientConfigMissingTrigger(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "trigger '' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigInvalidDeadbandType(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -650,12 +650,12 @@ func TestSubscribeClientConfigInvalidDeadbandType(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "deadband_type 'not_valid' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigMissingDeadbandType(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -684,12 +684,12 @@ func TestSubscribeClientConfigMissingDeadbandType(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "deadband_type '' not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigInvalidDeadbandValue(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -721,12 +721,12 @@ func TestSubscribeClientConfigInvalidDeadbandValue(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "negative deadband_value not supported, node 'ns=3;i=1'") } func TestSubscribeClientConfigMissingDeadbandValue(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -756,12 +756,12 @@ func TestSubscribeClientConfigMissingDeadbandValue(t *testing.T) { }, }) - _, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + _, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.ErrorContains(t, err, "deadband_value was not set, node 'ns=3;i=1'") } func TestSubscribeClientConfigValidMonitoringParams(t *testing.T) { - subscribeConfig := SubscribeClientConfig{ + subscribeConfig := subscribeClientConfig{ InputClientConfig: input.InputClientConfig{ OpcUAClientConfig: opcua.OpcUAClientConfig{ Endpoint: "opc.tcp://localhost:4840", @@ -799,7 +799,7 @@ func TestSubscribeClientConfigValidMonitoringParams(t *testing.T) { }, }) - subClient, err := subscribeConfig.CreateSubscribeClient(testutil.Logger{}) + subClient, err := subscribeConfig.createSubscribeClient(testutil.Logger{}) require.NoError(t, err) require.Equal(t, &ua.MonitoringParameters{ SamplingInterval: 50, diff --git a/plugins/inputs/opcua_listener/subscribe_client.go b/plugins/inputs/opcua_listener/subscribe_client.go index 320262bafbf60..1f70f006e7b6b 100644 --- a/plugins/inputs/opcua_listener/subscribe_client.go +++ b/plugins/inputs/opcua_listener/subscribe_client.go @@ -16,15 +16,15 @@ import ( "github.com/influxdata/telegraf/plugins/common/opcua/input" ) -type SubscribeClientConfig struct { +type subscribeClientConfig struct { input.InputClientConfig SubscriptionInterval config.Duration `toml:"subscription_interval"` ConnectFailBehavior string `toml:"connect_fail_behavior"` } -type SubscribeClient struct { +type subscribeClient struct { *input.OpcUAInputClient - Config SubscribeClientConfig + Config subscribeClientConfig sub *opcua.Subscription monitoredItemsReqs []*ua.MonitoredItemCreateRequest @@ -81,7 +81,7 @@ func assignConfigValuesToRequest(req *ua.MonitoredItemCreateRequest, monParams * return nil } -func (sc *SubscribeClientConfig) CreateSubscribeClient(log telegraf.Logger) (*SubscribeClient, error) { +func (sc *subscribeClientConfig) createSubscribeClient(log telegraf.Logger) (*subscribeClient, error) { client, err := sc.InputClientConfig.CreateInputClient(log) if err != nil { return nil, err @@ -92,7 +92,7 @@ func (sc *SubscribeClientConfig) CreateSubscribeClient(log telegraf.Logger) (*Su } processingCtx, processingCancel := context.WithCancel(context.Background()) - subClient := &SubscribeClient{ + subClient := &subscribeClient{ OpcUAInputClient: client, Config: *sc, monitoredItemsReqs: make([]*ua.MonitoredItemCreateRequest, len(client.NodeIDs)), @@ -118,7 +118,7 @@ func (sc *SubscribeClientConfig) CreateSubscribeClient(log telegraf.Logger) (*Su return subClient, nil } -func (o *SubscribeClient) Connect() error { +func (o *subscribeClient) connect() error { err := o.OpcUAClient.Connect(o.ctx) if err != nil { return err @@ -137,7 +137,7 @@ func (o *SubscribeClient) Connect() error { return nil } -func (o *SubscribeClient) Stop(ctx context.Context) <-chan struct{} { +func (o *subscribeClient) stop(ctx context.Context) <-chan struct{} { o.Log.Debugf("Stopping OPC subscription...") if o.State() != opcuaclient.Connected { return nil @@ -152,8 +152,8 @@ func (o *SubscribeClient) Stop(ctx context.Context) <-chan struct{} { return closing } -func (o *SubscribeClient) StartStreamValues(ctx context.Context) (<-chan telegraf.Metric, error) { - err := o.Connect() +func (o *subscribeClient) startStreamValues(ctx context.Context) (<-chan telegraf.Metric, error) { + err := o.connect() if err != nil { switch o.Config.ConnectFailBehavior { case "retry": @@ -191,7 +191,7 @@ func (o *SubscribeClient) StartStreamValues(ctx context.Context) (<-chan telegra return o.metrics, nil } -func (o *SubscribeClient) processReceivedNotifications() { +func (o *subscribeClient) processReceivedNotifications() { for { select { case <-o.ctx.Done(): diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index 81c63d13c4796..c89b623f9c802 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - ldap "github.com/go-ldap/ldap/v3" + "github.com/go-ldap/ldap/v3" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" @@ -17,56 +17,41 @@ import ( //go:embed sample.conf var sampleConfig string +var ( + searchBase = "cn=Monitor" + searchFilter = "(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))" + searchAttrs = []string{"monitorCounter", "monitorOpInitiated", "monitorOpCompleted", "monitoredInfo"} + attrTranslate = map[string]string{ + "monitorCounter": "", + "monitoredInfo": "", + "monitorOpInitiated": "_initiated", + "monitorOpCompleted": "_completed", + "olmMDBPagesMax": "_mdb_pages_max", + "olmMDBPagesUsed": "_mdb_pages_used", + "olmMDBPagesFree": "_mdb_pages_free", + "olmMDBReadersMax": "_mdb_readers_max", + "olmMDBReadersUsed": "_mdb_readers_used", + "olmMDBEntries": "_mdb_entries", + } +) + type Openldap struct { - Host string - Port int + Host string `toml:"host"` + Port int `toml:"port"` SSL string `toml:"ssl" deprecated:"1.7.0;1.35.0;use 'tls' instead"` TLS string `toml:"tls"` - InsecureSkipVerify bool + InsecureSkipVerify bool `toml:"insecure_skip_verify"` SSLCA string `toml:"ssl_ca" deprecated:"1.7.0;1.35.0;use 'tls_ca' instead"` TLSCA string `toml:"tls_ca"` - BindDn string - BindPassword string - ReverseMetricNames bool -} - -var searchBase = "cn=Monitor" -var searchFilter = "(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))" -var searchAttrs = []string{"monitorCounter", "monitorOpInitiated", "monitorOpCompleted", "monitoredInfo"} -var attrTranslate = map[string]string{ - "monitorCounter": "", - "monitoredInfo": "", - "monitorOpInitiated": "_initiated", - "monitorOpCompleted": "_completed", - "olmMDBPagesMax": "_mdb_pages_max", - "olmMDBPagesUsed": "_mdb_pages_used", - "olmMDBPagesFree": "_mdb_pages_free", - "olmMDBReadersMax": "_mdb_readers_max", - "olmMDBReadersUsed": "_mdb_readers_used", - "olmMDBEntries": "_mdb_entries", -} - -// return an initialized Openldap -func NewOpenldap() *Openldap { - return &Openldap{ - Host: "localhost", - Port: 389, - SSL: "", - TLS: "", - InsecureSkipVerify: false, - SSLCA: "", - TLSCA: "", - BindDn: "", - BindPassword: "", - ReverseMetricNames: false, - } + BindDn string `toml:"bind_dn"` + BindPassword string `toml:"bind_password"` + ReverseMetricNames bool `toml:"reverse_metric_names"` } func (*Openldap) SampleConfig() string { return sampleConfig } -// gather metrics func (o *Openldap) Gather(acc telegraf.Accumulator) error { if o.TLS == "" { o.TLS = o.SSL @@ -198,6 +183,21 @@ func dnToMetric(dn string, o *Openldap) string { return strings.ReplaceAll(metricName, ",", "") } +func newOpenldap() *Openldap { + return &Openldap{ + Host: "localhost", + Port: 389, + SSL: "", + TLS: "", + InsecureSkipVerify: false, + SSLCA: "", + TLSCA: "", + BindDn: "", + BindPassword: "", + ReverseMetricNames: false, + } +} + func init() { - inputs.Add("openldap", func() telegraf.Input { return NewOpenldap() }) + inputs.Add("openldap", func() telegraf.Input { return newOpenldap() }) } diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index 9066c6f5dc268..9daf61673bbc9 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -20,60 +20,38 @@ import ( //go:embed sample.conf var sampleConfig string -// Mapping of the ntpctl tag key to the index in the command output -var tagI = map[string]int{ - "stratum": 2, -} - -// Mapping of float metrics to their index in the command output -var floatI = map[string]int{ - "offset": 5, - "delay": 6, - "jitter": 7, -} +var ( + defaultBinary = "/usr/sbin/ntpctl" + defaultTimeout = config.Duration(5 * time.Second) -// Mapping of int metrics to their index in the command output -var intI = map[string]int{ - "wt": 0, - "tl": 1, - "next": 3, - "poll": 4, -} - -type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) + // Mapping of the ntpctl tag key to the index in the command output + tagI = map[string]int{ + "stratum": 2, + } + // Mapping of float metrics to their index in the command output + floatI = map[string]int{ + "offset": 5, + "delay": 6, + "jitter": 7, + } + // Mapping of int metrics to their index in the command output + intI = map[string]int{ + "wt": 0, + "tl": 1, + "next": 3, + "poll": 4, + } +) -// Openntpd is used to store configuration values type Openntpd struct { - Binary string - Timeout config.Duration - UseSudo bool + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` run runner } -var defaultBinary = "/usr/sbin/ntpctl" -var defaultTimeout = config.Duration(5 * time.Second) - -// Shell out to ntpctl and return the output -func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { - cmdArgs := []string{"-s", "peers"} - - cmd := exec.Command(cmdName, cmdArgs...) - - if useSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) - cmd = exec.Command("sudo", cmdArgs...) - } - - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) - if err != nil { - return &out, fmt.Errorf("error running ntpctl: %w", err) - } - - return &out, nil -} +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) func (*Openntpd) SampleConfig() string { return sampleConfig @@ -190,6 +168,27 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { return nil } +// Shell out to ntpctl and return the output +func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { + cmdArgs := []string{"-s", "peers"} + + cmd := exec.Command(cmdName, cmdArgs...) + + if useSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running ntpctl: %w", err) + } + + return &out, nil +} + func init() { inputs.Add("openntpd", func() telegraf.Input { return &Openntpd{ diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index df3b7187b094f..0ea15d0aa5703 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { +func openntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } @@ -19,7 +19,7 @@ func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buff func TestParseSimpleOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutput), + run: openntpdCTL(simpleOutput), } err := v.Gather(acc) @@ -50,7 +50,7 @@ func TestParseSimpleOutput(t *testing.T) { func TestParseSimpleOutputwithStatePrefix(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputwithStatePrefix), + run: openntpdCTL(simpleOutputwithStatePrefix), } err := v.Gather(acc) @@ -82,7 +82,7 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { func TestParseSimpleOutputInvalidPeer(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputInvalidPeer), + run: openntpdCTL(simpleOutputInvalidPeer), } err := v.Gather(acc) @@ -110,7 +110,7 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { func TestParseSimpleOutputServersDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServersDNSError), + run: openntpdCTL(simpleOutputServersDNSError), } err := v.Gather(acc) @@ -152,7 +152,7 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { func TestParseSimpleOutputServerDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServerDNSError), + run: openntpdCTL(simpleOutputServerDNSError), } err := v.Gather(acc) @@ -180,7 +180,7 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(fullOutput), + run: openntpdCTL(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/opensearch_query/aggregation.bucket.go b/plugins/inputs/opensearch_query/aggregation.bucket.go index 87669e5c7ad0f..fcde3cc27320f 100644 --- a/plugins/inputs/opensearch_query/aggregation.bucket.go +++ b/plugins/inputs/opensearch_query/aggregation.bucket.go @@ -5,9 +5,9 @@ import ( "fmt" ) -type BucketAggregationRequest map[string]*aggregationFunction +type bucketAggregationRequest map[string]*aggregationFunction -func (b BucketAggregationRequest) AddAggregation(name, aggType, field string) error { +func (b bucketAggregationRequest) addAggregation(name, aggType, field string) error { switch aggType { case "terms": default: @@ -22,11 +22,11 @@ func (b BucketAggregationRequest) AddAggregation(name, aggType, field string) er return nil } -func (b BucketAggregationRequest) AddNestedAggregation(name string, a AggregationRequest) { +func (b bucketAggregationRequest) addNestedAggregation(name string, a aggregationRequest) { b[name].nested = a } -func (b BucketAggregationRequest) BucketSize(name string, size int) error { +func (b bucketAggregationRequest) bucketSize(name string, size int) error { if size <= 0 { return errors.New("invalid size; must be integer value > 0") } @@ -35,11 +35,11 @@ func (b BucketAggregationRequest) BucketSize(name string, size int) error { return fmt.Errorf("aggregation %q not found", name) } - b[name].Size(size) + b[name].setSize(size) return nil } -func (b BucketAggregationRequest) Missing(name, missing string) { - b[name].Missing(missing) +func (b bucketAggregationRequest) missing(name, missing string) { + b[name].setMissing(missing) } diff --git a/plugins/inputs/opensearch_query/aggregation.go b/plugins/inputs/opensearch_query/aggregation.go index 4dc8f7b070ec3..e4c8f68ad5875 100644 --- a/plugins/inputs/opensearch_query/aggregation.go +++ b/plugins/inputs/opensearch_query/aggregation.go @@ -4,14 +4,8 @@ import ( "encoding/json" ) -type AggregationRequest interface { - AddAggregation(string, string, string) error -} - -type NestedAggregation interface { - Nested(string, AggregationRequest) - Missing(string) - Size(int) +type aggregationRequest interface { + addAggregation(string, string, string) error } type aggregationFunction struct { @@ -20,7 +14,7 @@ type aggregationFunction struct { size int missing string - nested AggregationRequest + nested aggregationRequest } func (a *aggregationFunction) MarshalJSON() ([]byte, error) { @@ -45,11 +39,11 @@ func (a *aggregationFunction) MarshalJSON() ([]byte, error) { return json.Marshal(agg) } -func (a *aggregationFunction) Size(size int) { +func (a *aggregationFunction) setSize(size int) { a.size = size } -func (a *aggregationFunction) Missing(missing string) { +func (a *aggregationFunction) setMissing(missing string) { a.missing = missing } diff --git a/plugins/inputs/opensearch_query/aggregation.metric.go b/plugins/inputs/opensearch_query/aggregation.metric.go index d18296757af0e..084b9e2c3de0f 100644 --- a/plugins/inputs/opensearch_query/aggregation.metric.go +++ b/plugins/inputs/opensearch_query/aggregation.metric.go @@ -2,9 +2,9 @@ package opensearch_query import "fmt" -type MetricAggregationRequest map[string]*aggregationFunction +type metricAggregationRequest map[string]*aggregationFunction -func (m MetricAggregationRequest) AddAggregation(name, aggType, field string) error { +func (m metricAggregationRequest) addAggregation(name, aggType, field string) error { if t := getAggregationFunctionType(aggType); t != "metric" { return fmt.Errorf("aggregation function %q not supported", aggType) } diff --git a/plugins/inputs/opensearch_query/aggregation.response.go b/plugins/inputs/opensearch_query/aggregation.response.go index 54c5f173feb57..122a0cabb0407 100644 --- a/plugins/inputs/opensearch_query/aggregation.response.go +++ b/plugins/inputs/opensearch_query/aggregation.response.go @@ -36,7 +36,7 @@ type bucketData struct { subaggregation aggregation } -func (a *aggregationResponse) GetMetrics(acc telegraf.Accumulator, measurement string) error { +func (a *aggregationResponse) getMetrics(acc telegraf.Accumulator, measurement string) error { // Simple case (no aggregations) if a.Aggregations == nil { tags := make(map[string]string) @@ -47,20 +47,20 @@ func (a *aggregationResponse) GetMetrics(acc telegraf.Accumulator, measurement s return nil } - return a.Aggregations.GetMetrics(acc, measurement, a.Hits.TotalHits.Value, make(map[string]string)) + return a.Aggregations.getMetrics(acc, measurement, a.Hits.TotalHits.Value, make(map[string]string)) } -func (a *aggregation) GetMetrics(acc telegraf.Accumulator, measurement string, docCount int64, tags map[string]string) error { +func (a *aggregation) getMetrics(acc telegraf.Accumulator, measurement string, docCount int64, tags map[string]string) error { var err error fields := make(map[string]interface{}) for name, agg := range *a { - if agg.IsAggregation() { + if agg.isAggregation() { for _, bucket := range agg.buckets { tt := map[string]string{name: bucket.Key} for k, v := range tags { tt[k] = v } - err = bucket.subaggregation.GetMetrics(acc, measurement, bucket.DocumentCount, tt) + err = bucket.subaggregation.getMetrics(acc, measurement, bucket.DocumentCount, tt) if err != nil { return err } @@ -101,7 +101,7 @@ func (a *aggregateValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, &a.metrics) } -func (a *aggregateValue) IsAggregation() bool { +func (a *aggregateValue) isAggregation() bool { return !(a.buckets == nil) } diff --git a/plugins/inputs/opensearch_query/opensearch_query.go b/plugins/inputs/opensearch_query/opensearch_query.go index b9cefce59e025..833bbaab960c1 100644 --- a/plugins/inputs/opensearch_query/opensearch_query.go +++ b/plugins/inputs/opensearch_query/opensearch_query.go @@ -25,7 +25,6 @@ import ( //go:embed sample.conf var sampleConfig string -// OpensearchQuery struct type OpensearchQuery struct { URLs []string `toml:"urls"` Username config.Secret `toml:"username"` @@ -41,7 +40,6 @@ type OpensearchQuery struct { osClient *opensearch.Client } -// osAggregation struct type osAggregation struct { Index string `toml:"index"` MeasurementName string `toml:"measurement_name"` @@ -56,14 +54,13 @@ type osAggregation struct { MissingTagValue string `toml:"missing_tag_value"` mapMetricFields map[string]string - aggregation AggregationRequest + aggregation aggregationRequest } func (*OpensearchQuery) SampleConfig() string { return sampleConfig } -// Init the plugin. func (o *OpensearchQuery) Init() error { if o.URLs == nil { return errors.New("no urls defined") @@ -89,19 +86,21 @@ func (o *OpensearchQuery) Init() error { return nil } -func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) { - for _, metricField := range agg.MetricFields { - if _, ok := agg.mapMetricFields[metricField]; !ok { - return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index) - } - } +func (o *OpensearchQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup - err = agg.buildAggregationQuery() - if err != nil { - return fmt.Errorf("error building aggregation: %w", err) + for _, agg := range o.Aggregations { + wg.Add(1) + go func(agg osAggregation) { + defer wg.Done() + err := o.osAggregationQuery(acc, agg) + if err != nil { + acc.AddError(fmt.Errorf("opensearch query aggregation %q: %w ", agg.MeasurementName, err)) + } + }(agg) } - o.Aggregations[i] = agg + wg.Wait() return nil } @@ -136,22 +135,19 @@ func (o *OpensearchQuery) newClient() error { return err } -// Gather writes the results of the queries from OpenSearch to the Accumulator. -func (o *OpensearchQuery) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup +func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) { + for _, metricField := range agg.MetricFields { + if _, ok := agg.mapMetricFields[metricField]; !ok { + return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index) + } + } - for _, agg := range o.Aggregations { - wg.Add(1) - go func(agg osAggregation) { - defer wg.Done() - err := o.osAggregationQuery(acc, agg) - if err != nil { - acc.AddError(fmt.Errorf("opensearch query aggregation %q: %w ", agg.MeasurementName, err)) - } - }(agg) + err = agg.buildAggregationQuery() + if err != nil { + return fmt.Errorf("error building aggregation: %w", err) } - wg.Wait() + o.Aggregations[i] = agg return nil } @@ -164,16 +160,7 @@ func (o *OpensearchQuery) osAggregationQuery(acc telegraf.Accumulator, aggregati return err } - return searchResult.GetMetrics(acc, aggregation.MeasurementName) -} - -func init() { - inputs.Add("opensearch_query", func() telegraf.Input { - return &OpensearchQuery{ - Timeout: config.Duration(time.Second * 5), - HealthCheckInterval: config.Duration(time.Second * 10), - } - }) + return searchResult.getMetrics(acc, aggregation.MeasurementName) } func (o *OpensearchQuery) runAggregationQuery(ctx context.Context, aggregation osAggregation) (*aggregationResponse, error) { @@ -184,13 +171,13 @@ func (o *OpensearchQuery) runAggregationQuery(ctx context.Context, aggregation o filterQuery = "*" } - aq := &Query{ + aq := &query{ Size: 0, Aggregations: aggregation.aggregation, Query: nil, } - boolQuery := &BoolQuery{ + boolQuery := &boolQuery{ FilterQueryString: filterQuery, TimestampField: aggregation.DateField, TimeRangeFrom: from, @@ -231,8 +218,8 @@ func (o *OpensearchQuery) runAggregationQuery(ctx context.Context, aggregation o } func (aggregation *osAggregation) buildAggregationQuery() error { - var agg AggregationRequest - agg = &MetricAggregationRequest{} + var agg aggregationRequest + agg = &metricAggregationRequest{} // create one aggregation per metric field found & function defined for numeric fields for k, v := range aggregation.mapMetricFields { @@ -242,7 +229,7 @@ func (aggregation *osAggregation) buildAggregationQuery() error { continue } - err := agg.AddAggregation(strings.ReplaceAll(k, ".", "_")+"_"+aggregation.MetricFunction, aggregation.MetricFunction, k) + err := agg.addAggregation(strings.ReplaceAll(k, ".", "_")+"_"+aggregation.MetricFunction, aggregation.MetricFunction, k) if err != nil { return err } @@ -250,21 +237,21 @@ func (aggregation *osAggregation) buildAggregationQuery() error { // create a terms aggregation per tag for _, term := range aggregation.Tags { - bucket := &BucketAggregationRequest{} + bucket := &bucketAggregationRequest{} name := strings.ReplaceAll(term, ".", "_") - err := bucket.AddAggregation(name, "terms", term) + err := bucket.addAggregation(name, "terms", term) if err != nil { return err } - err = bucket.BucketSize(name, 1000) + err = bucket.bucketSize(name, 1000) if err != nil { return err } if aggregation.IncludeMissingTag && aggregation.MissingTagValue != "" { - bucket.Missing(name, aggregation.MissingTagValue) + bucket.missing(name, aggregation.MissingTagValue) } - bucket.AddNestedAggregation(name, agg) + bucket.addNestedAggregation(name, agg) agg = bucket } @@ -273,3 +260,12 @@ func (aggregation *osAggregation) buildAggregationQuery() error { return nil } + +func init() { + inputs.Add("opensearch_query", func() telegraf.Input { + return &OpensearchQuery{ + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), + } + }) +} diff --git a/plugins/inputs/opensearch_query/opensearch_query_test.go b/plugins/inputs/opensearch_query/opensearch_query_test.go index cc8627a98d4a8..2a1aced1e5418 100644 --- a/plugins/inputs/opensearch_query/opensearch_query_test.go +++ b/plugins/inputs/opensearch_query/opensearch_query_test.go @@ -12,13 +12,14 @@ import ( "time" "github.com/docker/go-connections/nat" + "github.com/opensearch-project/opensearch-go/v2/opensearchutil" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/opensearch-project/opensearch-go/v2/opensearchutil" - "github.com/stretchr/testify/require" - "github.com/testcontainers/testcontainers-go/wait" ) const ( @@ -674,18 +675,18 @@ func TestOpensearchQueryIntegration(t *testing.T) { } func TestMetricAggregationMarshal(t *testing.T) { - agg := &MetricAggregationRequest{} - err := agg.AddAggregation("sum_taxful_total_price", "sum", "taxful_total_price") + agg := &metricAggregationRequest{} + err := agg.addAggregation("sum_taxful_total_price", "sum", "taxful_total_price") require.NoError(t, err) _, err = json.Marshal(agg) require.NoError(t, err) - bucket := &BucketAggregationRequest{} - err = bucket.AddAggregation("terms_by_currency", "terms", "currency") + bucket := &bucketAggregationRequest{} + err = bucket.addAggregation("terms_by_currency", "terms", "currency") require.NoError(t, err) - bucket.AddNestedAggregation("terms_by_currency", agg) + bucket.addNestedAggregation("terms_by_currency", agg) _, err = json.Marshal(bucket) require.NoError(t, err) } diff --git a/plugins/inputs/opensearch_query/query.go b/plugins/inputs/opensearch_query/query.go index 1a12aa5e3c594..e06f518f6c22b 100644 --- a/plugins/inputs/opensearch_query/query.go +++ b/plugins/inputs/opensearch_query/query.go @@ -5,13 +5,13 @@ import ( "time" ) -type Query struct { +type query struct { Size int `json:"size"` - Aggregations AggregationRequest `json:"aggregations"` + Aggregations aggregationRequest `json:"aggregations"` Query interface{} `json:"query,omitempty"` } -type BoolQuery struct { +type boolQuery struct { FilterQueryString string TimestampField string TimeRangeFrom time.Time @@ -19,7 +19,7 @@ type BoolQuery struct { DateFieldFormat string } -func (b *BoolQuery) MarshalJSON() ([]byte, error) { +func (b *boolQuery) MarshalJSON() ([]byte, error) { // Construct range dateTimeRange := map[string]interface{}{ "from": b.TimeRangeFrom, diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 89f0f822d8c42..e3511e9a1f59f 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -21,49 +21,29 @@ import ( //go:embed sample.conf var sampleConfig string -type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) +var ( + defaultBinary = "/usr/sbin/smtpctl" + defaultTimeout = config.Duration(time.Second) +) -// Opensmtpd is used to store configuration values type Opensmtpd struct { - Binary string - Timeout config.Duration - UseSudo bool + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` run runner } -var defaultBinary = "/usr/sbin/smtpctl" -var defaultTimeout = config.Duration(time.Second) - -// Shell out to opensmtpd_stat and return the output -func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { - cmdArgs := []string{"show", "stats"} - - cmd := exec.Command(cmdName, cmdArgs...) - - if useSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) - cmd = exec.Command("sudo", cmdArgs...) - } - - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) - if err != nil { - return &out, fmt.Errorf("error running smtpctl: %w", err) - } - - return &out, nil -} +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) -// Gather collects the configured stats from smtpctl and adds them to the -// Accumulator func (*Opensmtpd) SampleConfig() string { return sampleConfig } -// All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { + // All the dots in stat name will be replaced by underscores. + // Histogram statistics will not be collected. + // Always exclude uptime.human statistics statExcluded := []string{"uptime.human"} filterExcluded, err := filter.Compile(statExcluded) @@ -108,6 +88,27 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { return nil } +// Shell out to opensmtpd_stat and return the output +func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { + cmdArgs := []string{"show", "stats"} + + cmd := exec.Command(cmdName, cmdArgs...) + + if useSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running smtpctl: %w", err) + } + + return &out, nil +} + func init() { inputs.Add("opensmtpd", func() telegraf.Input { return &Opensmtpd{ diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 599bf500895f9..5d856f68b761c 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { +func smtpCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } @@ -19,7 +19,7 @@ func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Opensmtpd{ - run: SMTPCTL(fullOutput), + run: smtpCTL(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go index 4031ac2217711..881466a540e89 100644 --- a/plugins/inputs/openstack/openstack.go +++ b/plugins/inputs/openstack/openstack.go @@ -57,7 +57,6 @@ var ( typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`) ) -// OpenStack is the main structure associated with a collection instance. type OpenStack struct { // Configuration variables IdentityEndpoint string `toml:"authentication_endpoint"` @@ -93,19 +92,10 @@ type OpenStack struct { services map[string]bool } -// convertTimeFormat, to convert time format based on HumanReadableTS -func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { - if o.HumanReadableTS { - return t.Format("2006-01-02T15:04:05.999999999Z07:00") - } - return t.UnixNano() -} - func (*OpenStack) SampleConfig() string { return sampleConfig } -// initialize performs any necessary initialization functions func (o *OpenStack) Init() error { if len(o.EnabledServices) == 0 { o.EnabledServices = []string{"services", "projects", "hypervisors", "flavors", "networks", "volumes"} @@ -266,14 +256,6 @@ func (o *OpenStack) Start(telegraf.Accumulator) error { return nil } -func (o *OpenStack) Stop() { - if o.client != nil { - o.client.CloseIdleConnections() - } -} - -// Gather gathers resources from the OpenStack API and accumulates metrics. This -// implements the Input interface. func (o *OpenStack) Gather(acc telegraf.Accumulator) error { ctx := context.Background() callDuration := make(map[string]interface{}, len(o.services)) @@ -344,6 +326,12 @@ func (o *OpenStack) Gather(acc telegraf.Accumulator) error { return nil } +func (o *OpenStack) Stop() { + if o.client != nil { + o.client.CloseIdleConnections() + } +} + func (o *OpenStack) availableServicesFromAuth(provider *gophercloud.ProviderClient) (bool, error) { authResult := provider.GetAuthResult() if authResult == nil { @@ -1067,7 +1055,14 @@ func (o *OpenStack) gatherServerDiagnostics(ctx context.Context, acc telegraf.Ac return nil } -// init registers a callback which creates a new OpenStack input instance. +// convertTimeFormat, to convert time format based on HumanReadableTS +func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { + if o.HumanReadableTS { + return t.Format("2006-01-02T15:04:05.999999999Z07:00") + } + return t.UnixNano() +} + func init() { inputs.Add("openstack", func() telegraf.Input { return &OpenStack{ diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go index 6b6bd5a695877..3323569935728 100644 --- a/plugins/inputs/opentelemetry/opentelemetry.go +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -46,10 +46,6 @@ func (*OpenTelemetry) SampleConfig() string { return sampleConfig } -func (*OpenTelemetry) Gather(telegraf.Accumulator) error { - return nil -} - func (o *OpenTelemetry) Init() error { if o.ServiceAddress == "" { o.ServiceAddress = "0.0.0.0:4317" @@ -123,6 +119,10 @@ func (o *OpenTelemetry) Start(acc telegraf.Accumulator) error { return nil } +func (*OpenTelemetry) Gather(telegraf.Accumulator) error { + return nil +} + func (o *OpenTelemetry) Stop() { if o.grpcServer != nil { o.grpcServer.Stop() diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index ae6b198f2da5b..751aaf1af3b12 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb-observability/otel2influx" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/sdk/metric" @@ -24,7 +25,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/protobuf/encoding/protojson" - "github.com/influxdata/influxdb-observability/otel2influx" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 6543b113abcb2..4eb354474d150 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -174,7 +174,7 @@ func (n *OpenWeatherMap) gatherWeather(acc telegraf.Accumulator, city string) er return fmt.Errorf("querying %q failed: %w", addr, err) } - var e WeatherEntry + var e weatherEntry if err := json.Unmarshal(buf, &e); err != nil { return fmt.Errorf("parsing JSON response failed: %w", err) } @@ -223,7 +223,7 @@ func (n *OpenWeatherMap) gatherWeatherBatch(acc telegraf.Accumulator, cities str return fmt.Errorf("querying %q failed: %w", addr, err) } - var status Status + var status status if err := json.Unmarshal(buf, &status); err != nil { return fmt.Errorf("parsing JSON response failed: %w", err) } @@ -274,7 +274,7 @@ func (n *OpenWeatherMap) gatherForecast(acc telegraf.Accumulator, city string) e return fmt.Errorf("querying %q failed: %w", addr, err) } - var status Status + var status status if err := json.Unmarshal(buf, &status); err != nil { return fmt.Errorf("parsing JSON response failed: %w", err) } diff --git a/plugins/inputs/openweathermap/types.go b/plugins/inputs/openweathermap/types.go index 4920dd3f7acde..8fc170a472aa1 100644 --- a/plugins/inputs/openweathermap/types.go +++ b/plugins/inputs/openweathermap/types.go @@ -1,6 +1,6 @@ package openweathermap -type WeatherEntry struct { +type weatherEntry struct { Dt int64 `json:"dt"` Clouds struct { All int64 `json:"all"` @@ -43,21 +43,21 @@ type WeatherEntry struct { } `json:"weather"` } -func (e WeatherEntry) snow() float64 { +func (e weatherEntry) snow() float64 { if e.Snow.Snow1 > 0 { return e.Snow.Snow1 } return e.Snow.Snow3 } -func (e WeatherEntry) rain() float64 { +func (e weatherEntry) rain() float64 { if e.Rain.Rain1 > 0 { return e.Rain.Rain1 } return e.Rain.Rain3 } -type Status struct { +type status struct { City struct { Coord struct { Lat float64 `json:"lat"` @@ -67,5 +67,5 @@ type Status struct { ID int64 `json:"id"` Name string `json:"name"` } `json:"city"` - List []WeatherEntry `json:"list"` + List []weatherEntry `json:"list"` } From 2ac55fe9866f1158e3e446540f6420d2e0e114de Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:36:47 +0100 Subject: [PATCH 124/170] chore(parsers.avro): Add unit-test for enum (#16260) --- plugins/parsers/avro/parser_test.go | 12 +++--- .../bad-timestamp-format/expected.err | 0 .../bad-timestamp-format/expected.out | 0 .../bad-timestamp-format/message.avro | 0 .../bad-timestamp-format/telegraf.conf | 4 +- .../benchmark/expected.out | 0 .../benchmark/message.json | 0 .../benchmark/telegraf.conf | 2 +- .../config-both/expected.err | 0 .../config-both/expected.out | 0 .../config-both/message.avro | 0 .../config-both/telegraf.conf | 4 +- .../config-neither/expected.err | 0 .../config-neither/expected.out | 0 .../config-neither/message.avro | 0 .../config-neither/telegraf.conf | 2 +- .../parsers/avro/testcases/enum/expected.out | 1 + .../parsers/avro/testcases/enum/message.json | 7 ++++ .../parsers/avro/testcases/enum/telegraf.conf | 41 +++++++++++++++++++ .../json-array/expected.out | 0 .../json-array/message.json | 0 .../json-array/telegraf.conf | 2 +- .../json-format/expected.out | 0 .../json-format/message.json | 0 .../json-format/telegraf.conf | 2 +- .../expected.out | 0 .../message.avro | 0 .../telegraf.conf | 4 +- .../no-timestamp-format/expected.out | 0 .../no-timestamp-format/message.avro | 0 .../no-timestamp-format/telegraf.conf | 4 +- .../supplied_timestamp/expected.out | 0 .../supplied_timestamp/message.avro | 0 .../supplied_timestamp/telegraf.conf | 4 +- .../expected.out | 0 .../message.avro | 0 .../telegraf.conf | 4 +- .../expected.out | 0 .../message.avro | 0 .../telegraf.conf | 4 +- .../union-any/expected.out | 0 .../union-any/message.json | 0 .../union-any/telegraf.conf | 2 +- .../union-array/expected.out | 0 .../union-array/message.json | 0 .../union-array/telegraf.conf | 2 +- .../union-nullable/expected.out | 0 .../union-nullable/message.json | 0 .../union-nullable/telegraf.conf | 2 +- .../union/expected.out | 0 .../union/message.json | 0 .../union/telegraf.conf | 2 +- 52 files changed, 77 insertions(+), 28 deletions(-) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/expected.err (100%) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/bad-timestamp-format/telegraf.conf (87%) rename plugins/parsers/avro/{testdata => testcases}/benchmark/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/benchmark/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/benchmark/telegraf.conf (94%) rename plugins/parsers/avro/{testdata => testcases}/config-both/expected.err (100%) rename plugins/parsers/avro/{testdata => testcases}/config-both/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/config-both/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/config-both/telegraf.conf (88%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/expected.err (100%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/config-neither/telegraf.conf (64%) create mode 100644 plugins/parsers/avro/testcases/enum/expected.out create mode 100644 plugins/parsers/avro/testcases/enum/message.json create mode 100644 plugins/parsers/avro/testcases/enum/telegraf.conf rename plugins/parsers/avro/{testdata => testcases}/json-array/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/json-array/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/json-array/telegraf.conf (93%) rename plugins/parsers/avro/{testdata => testcases}/json-format/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/json-format/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/json-format/telegraf.conf (95%) rename plugins/parsers/avro/{testdata => testcases}/measurement_name_from_message/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/measurement_name_from_message/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/measurement_name_from_message/telegraf.conf (86%) rename plugins/parsers/avro/{testdata => testcases}/no-timestamp-format/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/no-timestamp-format/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/no-timestamp-format/telegraf.conf (86%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp/telegraf.conf (87%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_specified/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_specified/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_specified/telegraf.conf (85%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_unspecified/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_unspecified/message.avro (100%) rename plugins/parsers/avro/{testdata => testcases}/supplied_timestamp_fields_unspecified/telegraf.conf (80%) rename plugins/parsers/avro/{testdata => testcases}/union-any/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union-any/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union-any/telegraf.conf (96%) rename plugins/parsers/avro/{testdata => testcases}/union-array/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union-array/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union-array/telegraf.conf (93%) rename plugins/parsers/avro/{testdata => testcases}/union-nullable/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union-nullable/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union-nullable/telegraf.conf (95%) rename plugins/parsers/avro/{testdata => testcases}/union/expected.out (100%) rename plugins/parsers/avro/{testdata => testcases}/union/message.json (100%) rename plugins/parsers/avro/{testdata => testcases}/union/telegraf.conf (96%) diff --git a/plugins/parsers/avro/parser_test.go b/plugins/parsers/avro/parser_test.go index 6ab43f2bbc21a..43c44ac8ccbf9 100644 --- a/plugins/parsers/avro/parser_test.go +++ b/plugins/parsers/avro/parser_test.go @@ -17,8 +17,8 @@ import ( ) func TestCases(t *testing.T) { - // Get all directories in testdata - folders, err := os.ReadDir("testdata") + // Get all test-case directories + folders, err := os.ReadDir("testcases") require.NoError(t, err) // Make sure testdata contains data require.NotEmpty(t, folders) @@ -30,7 +30,7 @@ func TestCases(t *testing.T) { for _, f := range folders { fname := f.Name() - testdataPath := filepath.Join("testdata", fname) + testdataPath := filepath.Join("testcases", fname) configFilename := filepath.Join(testdataPath, "telegraf.conf") expectedFilename := filepath.Join(testdataPath, "expected.out") expectedErrorFilename := filepath.Join(testdataPath, "expected.err") @@ -110,7 +110,7 @@ func BenchmarkParsing(b *testing.B) { } require.NoError(b, plugin.Init()) - benchmarkData, err := os.ReadFile(filepath.Join("testdata", "benchmark", "message.json")) + benchmarkData, err := os.ReadFile(filepath.Join("testcases", "benchmark", "message.json")) require.NoError(b, err) b.ResetTimer() @@ -131,7 +131,7 @@ func TestBenchmarkDataBinary(t *testing.T) { } require.NoError(t, plugin.Init()) - benchmarkDir := filepath.Join("testdata", "benchmark") + benchmarkDir := filepath.Join("testcases", "benchmark") // Read the expected valued from file parser := &influx.Parser{} @@ -167,7 +167,7 @@ func BenchmarkParsingBinary(b *testing.B) { require.NoError(b, plugin.Init()) // Re-encode the benchmark data from JSON to binary format - jsonData, err := os.ReadFile(filepath.Join("testdata", "benchmark", "message.json")) + jsonData, err := os.ReadFile(filepath.Join("testcases", "benchmark", "message.json")) require.NoError(b, err) codec, err := goavro.NewCodec(benchmarkSchema) require.NoError(b, err) diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/expected.err b/plugins/parsers/avro/testcases/bad-timestamp-format/expected.err similarity index 100% rename from plugins/parsers/avro/testdata/bad-timestamp-format/expected.err rename to plugins/parsers/avro/testcases/bad-timestamp-format/expected.err diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/expected.out b/plugins/parsers/avro/testcases/bad-timestamp-format/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/bad-timestamp-format/expected.out rename to plugins/parsers/avro/testcases/bad-timestamp-format/expected.out diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/message.avro b/plugins/parsers/avro/testcases/bad-timestamp-format/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/bad-timestamp-format/message.avro rename to plugins/parsers/avro/testcases/bad-timestamp-format/message.avro diff --git a/plugins/parsers/avro/testdata/bad-timestamp-format/telegraf.conf b/plugins/parsers/avro/testcases/bad-timestamp-format/telegraf.conf similarity index 87% rename from plugins/parsers/avro/testdata/bad-timestamp-format/telegraf.conf rename to plugins/parsers/avro/testcases/bad-timestamp-format/telegraf.conf index b4a89fad7095b..07297864fd38e 100644 --- a/plugins/parsers/avro/testdata/bad-timestamp-format/telegraf.conf +++ b/plugins/parsers/avro/testcases/bad-timestamp-format/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/bad-timestamp-format/message.avro"] + files = ["./testcases/bad-timestamp-format/message.avro"] data_format = "avro" avro_measurement = "measurement" @@ -26,4 +26,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/benchmark/expected.out b/plugins/parsers/avro/testcases/benchmark/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/benchmark/expected.out rename to plugins/parsers/avro/testcases/benchmark/expected.out diff --git a/plugins/parsers/avro/testdata/benchmark/message.json b/plugins/parsers/avro/testcases/benchmark/message.json similarity index 100% rename from plugins/parsers/avro/testdata/benchmark/message.json rename to plugins/parsers/avro/testcases/benchmark/message.json diff --git a/plugins/parsers/avro/testdata/benchmark/telegraf.conf b/plugins/parsers/avro/testcases/benchmark/telegraf.conf similarity index 94% rename from plugins/parsers/avro/testdata/benchmark/telegraf.conf rename to plugins/parsers/avro/testcases/benchmark/telegraf.conf index c20f7ccc753c9..bc67e9a315dde 100644 --- a/plugins/parsers/avro/testdata/benchmark/telegraf.conf +++ b/plugins/parsers/avro/testcases/benchmark/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/benchmark/message.json"] + files = ["./testcases/benchmark/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/config-both/expected.err b/plugins/parsers/avro/testcases/config-both/expected.err similarity index 100% rename from plugins/parsers/avro/testdata/config-both/expected.err rename to plugins/parsers/avro/testcases/config-both/expected.err diff --git a/plugins/parsers/avro/testdata/config-both/expected.out b/plugins/parsers/avro/testcases/config-both/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/config-both/expected.out rename to plugins/parsers/avro/testcases/config-both/expected.out diff --git a/plugins/parsers/avro/testdata/config-both/message.avro b/plugins/parsers/avro/testcases/config-both/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/config-both/message.avro rename to plugins/parsers/avro/testcases/config-both/message.avro diff --git a/plugins/parsers/avro/testdata/config-both/telegraf.conf b/plugins/parsers/avro/testcases/config-both/telegraf.conf similarity index 88% rename from plugins/parsers/avro/testdata/config-both/telegraf.conf rename to plugins/parsers/avro/testcases/config-both/telegraf.conf index 61cba90f22369..fb6bb5eb2dbb1 100644 --- a/plugins/parsers/avro/testdata/config-both/telegraf.conf +++ b/plugins/parsers/avro/testcases/config-both/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/config-both/message.avro"] + files = ["./testcases/config-both/message.avro"] data_format = "avro" avro_measurement = "measurement" @@ -25,4 +25,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/config-neither/expected.err b/plugins/parsers/avro/testcases/config-neither/expected.err similarity index 100% rename from plugins/parsers/avro/testdata/config-neither/expected.err rename to plugins/parsers/avro/testcases/config-neither/expected.err diff --git a/plugins/parsers/avro/testdata/config-neither/expected.out b/plugins/parsers/avro/testcases/config-neither/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/config-neither/expected.out rename to plugins/parsers/avro/testcases/config-neither/expected.out diff --git a/plugins/parsers/avro/testdata/config-neither/message.avro b/plugins/parsers/avro/testcases/config-neither/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/config-neither/message.avro rename to plugins/parsers/avro/testcases/config-neither/message.avro diff --git a/plugins/parsers/avro/testdata/config-neither/telegraf.conf b/plugins/parsers/avro/testcases/config-neither/telegraf.conf similarity index 64% rename from plugins/parsers/avro/testdata/config-neither/telegraf.conf rename to plugins/parsers/avro/testcases/config-neither/telegraf.conf index e52128df66d46..14adc8197ac2a 100644 --- a/plugins/parsers/avro/testdata/config-neither/telegraf.conf +++ b/plugins/parsers/avro/testcases/config-neither/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/config-neither/message.avro"] + files = ["./testcases/config-neither/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] diff --git a/plugins/parsers/avro/testcases/enum/expected.out b/plugins/parsers/avro/testcases/enum/expected.out new file mode 100644 index 0000000000000..15565a38cfdcf --- /dev/null +++ b/plugins/parsers/avro/testcases/enum/expected.out @@ -0,0 +1 @@ +sensors,name=temperature value_int=42i,status="OK" diff --git a/plugins/parsers/avro/testcases/enum/message.json b/plugins/parsers/avro/testcases/enum/message.json new file mode 100644 index 0000000000000..6d4f89a71540d --- /dev/null +++ b/plugins/parsers/avro/testcases/enum/message.json @@ -0,0 +1,7 @@ +{ + "name": "temperature", + "value": { + "int": 42 + }, + "status": "OK" +} \ No newline at end of file diff --git a/plugins/parsers/avro/testcases/enum/telegraf.conf b/plugins/parsers/avro/testcases/enum/telegraf.conf new file mode 100644 index 0000000000000..bcaa95c383d67 --- /dev/null +++ b/plugins/parsers/avro/testcases/enum/telegraf.conf @@ -0,0 +1,41 @@ +[[ inputs.file ]] + files = ["./testcases/enum/message.json"] + data_format = "avro" + + avro_format = "json" + avro_measurement = "sensors" + avro_tags = ["name"] + avro_fields = ["value", "status"] + avro_field_separator = "_" + avro_schema = ''' + { + "type": "record", + "name": "Metric", + "fields": [ + { + "name": "name", + "type": "string" + }, + { + "name": "value", + "type": [ + "null", + "int", + "string" + ] + }, + { + "name": "status", + "type": { + "type": "enum", + "name": "Status", + "symbols": [ + "UNKNOWN", + "OK", + "FAILURE" + ] + } + } + ] + } + ''' diff --git a/plugins/parsers/avro/testdata/json-array/expected.out b/plugins/parsers/avro/testcases/json-array/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/json-array/expected.out rename to plugins/parsers/avro/testcases/json-array/expected.out diff --git a/plugins/parsers/avro/testdata/json-array/message.json b/plugins/parsers/avro/testcases/json-array/message.json similarity index 100% rename from plugins/parsers/avro/testdata/json-array/message.json rename to plugins/parsers/avro/testcases/json-array/message.json diff --git a/plugins/parsers/avro/testdata/json-array/telegraf.conf b/plugins/parsers/avro/testcases/json-array/telegraf.conf similarity index 93% rename from plugins/parsers/avro/testdata/json-array/telegraf.conf rename to plugins/parsers/avro/testcases/json-array/telegraf.conf index 1133f3849f343..a7031ef8c1616 100644 --- a/plugins/parsers/avro/testdata/json-array/telegraf.conf +++ b/plugins/parsers/avro/testcases/json-array/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/json-array/message.json"] + files = ["./testcases/json-array/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/json-format/expected.out b/plugins/parsers/avro/testcases/json-format/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/json-format/expected.out rename to plugins/parsers/avro/testcases/json-format/expected.out diff --git a/plugins/parsers/avro/testdata/json-format/message.json b/plugins/parsers/avro/testcases/json-format/message.json similarity index 100% rename from plugins/parsers/avro/testdata/json-format/message.json rename to plugins/parsers/avro/testcases/json-format/message.json diff --git a/plugins/parsers/avro/testdata/json-format/telegraf.conf b/plugins/parsers/avro/testcases/json-format/telegraf.conf similarity index 95% rename from plugins/parsers/avro/testdata/json-format/telegraf.conf rename to plugins/parsers/avro/testcases/json-format/telegraf.conf index e2238f673f020..0eea8f8fc66df 100644 --- a/plugins/parsers/avro/testdata/json-format/telegraf.conf +++ b/plugins/parsers/avro/testcases/json-format/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/json-format/message.json"] + files = ["./testcases/json-format/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/measurement_name_from_message/expected.out b/plugins/parsers/avro/testcases/measurement_name_from_message/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/measurement_name_from_message/expected.out rename to plugins/parsers/avro/testcases/measurement_name_from_message/expected.out diff --git a/plugins/parsers/avro/testdata/measurement_name_from_message/message.avro b/plugins/parsers/avro/testcases/measurement_name_from_message/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/measurement_name_from_message/message.avro rename to plugins/parsers/avro/testcases/measurement_name_from_message/message.avro diff --git a/plugins/parsers/avro/testdata/measurement_name_from_message/telegraf.conf b/plugins/parsers/avro/testcases/measurement_name_from_message/telegraf.conf similarity index 86% rename from plugins/parsers/avro/testdata/measurement_name_from_message/telegraf.conf rename to plugins/parsers/avro/testcases/measurement_name_from_message/telegraf.conf index 4b7083ff8281d..97ece814c6ea3 100644 --- a/plugins/parsers/avro/testdata/measurement_name_from_message/telegraf.conf +++ b/plugins/parsers/avro/testcases/measurement_name_from_message/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/measurement_name_from_message/message.avro"] + files = ["./testcases/measurement_name_from_message/message.avro"] data_format = "avro" avro_measurement_field = "Measurement" avro_tags = [ "Server" ] @@ -27,4 +27,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/no-timestamp-format/expected.out b/plugins/parsers/avro/testcases/no-timestamp-format/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/no-timestamp-format/expected.out rename to plugins/parsers/avro/testcases/no-timestamp-format/expected.out diff --git a/plugins/parsers/avro/testdata/no-timestamp-format/message.avro b/plugins/parsers/avro/testcases/no-timestamp-format/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/no-timestamp-format/message.avro rename to plugins/parsers/avro/testcases/no-timestamp-format/message.avro diff --git a/plugins/parsers/avro/testdata/no-timestamp-format/telegraf.conf b/plugins/parsers/avro/testcases/no-timestamp-format/telegraf.conf similarity index 86% rename from plugins/parsers/avro/testdata/no-timestamp-format/telegraf.conf rename to plugins/parsers/avro/testcases/no-timestamp-format/telegraf.conf index a5d21090fa78d..c2f5d685857f3 100644 --- a/plugins/parsers/avro/testdata/no-timestamp-format/telegraf.conf +++ b/plugins/parsers/avro/testcases/no-timestamp-format/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/no-timestamp-format/message.avro"] + files = ["./testcases/no-timestamp-format/message.avro"] data_format = "avro" avro_measurement = "measurement" @@ -25,4 +25,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/supplied_timestamp/expected.out b/plugins/parsers/avro/testcases/supplied_timestamp/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp/expected.out rename to plugins/parsers/avro/testcases/supplied_timestamp/expected.out diff --git a/plugins/parsers/avro/testdata/supplied_timestamp/message.avro b/plugins/parsers/avro/testcases/supplied_timestamp/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp/message.avro rename to plugins/parsers/avro/testcases/supplied_timestamp/message.avro diff --git a/plugins/parsers/avro/testdata/supplied_timestamp/telegraf.conf b/plugins/parsers/avro/testcases/supplied_timestamp/telegraf.conf similarity index 87% rename from plugins/parsers/avro/testdata/supplied_timestamp/telegraf.conf rename to plugins/parsers/avro/testcases/supplied_timestamp/telegraf.conf index ee711eae3cd0e..f3eef5f38a7e1 100644 --- a/plugins/parsers/avro/testdata/supplied_timestamp/telegraf.conf +++ b/plugins/parsers/avro/testcases/supplied_timestamp/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/supplied_timestamp/message.avro"] + files = ["./testcases/supplied_timestamp/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] @@ -25,4 +25,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/expected.out b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/expected.out rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/expected.out diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/message.avro b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/message.avro rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/message.avro diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/telegraf.conf b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/telegraf.conf similarity index 85% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/telegraf.conf rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/telegraf.conf index 9ae72b5308cb2..79f29b5a4b802 100644 --- a/plugins/parsers/avro/testdata/supplied_timestamp_fields_specified/telegraf.conf +++ b/plugins/parsers/avro/testcases/supplied_timestamp_fields_specified/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/supplied_timestamp_fields_specified/message.avro"] + files = ["./testcases/supplied_timestamp_fields_specified/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] @@ -26,4 +26,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/expected.out b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/expected.out rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/expected.out diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/message.avro b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/message.avro similarity index 100% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/message.avro rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/message.avro diff --git a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/telegraf.conf b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/telegraf.conf similarity index 80% rename from plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/telegraf.conf rename to plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/telegraf.conf index d5c58355ebd92..925573f4f4fbe 100644 --- a/plugins/parsers/avro/testdata/supplied_timestamp_fields_unspecified/telegraf.conf +++ b/plugins/parsers/avro/testcases/supplied_timestamp_fields_unspecified/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/supplied_timestamp_fields_unspecified/message.avro"] + files = ["./testcases/supplied_timestamp_fields_unspecified/message.avro"] data_format = "avro" avro_measurement = "measurement" avro_tags = [ "tag" ] @@ -20,4 +20,4 @@ } ] } -''' +''' diff --git a/plugins/parsers/avro/testdata/union-any/expected.out b/plugins/parsers/avro/testcases/union-any/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union-any/expected.out rename to plugins/parsers/avro/testcases/union-any/expected.out diff --git a/plugins/parsers/avro/testdata/union-any/message.json b/plugins/parsers/avro/testcases/union-any/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union-any/message.json rename to plugins/parsers/avro/testcases/union-any/message.json diff --git a/plugins/parsers/avro/testdata/union-any/telegraf.conf b/plugins/parsers/avro/testcases/union-any/telegraf.conf similarity index 96% rename from plugins/parsers/avro/testdata/union-any/telegraf.conf rename to plugins/parsers/avro/testcases/union-any/telegraf.conf index e4966aee82c6b..1cea915bafe78 100644 --- a/plugins/parsers/avro/testdata/union-any/telegraf.conf +++ b/plugins/parsers/avro/testcases/union-any/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union-any/message.json"] + files = ["./testcases/union-any/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/union-array/expected.out b/plugins/parsers/avro/testcases/union-array/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union-array/expected.out rename to plugins/parsers/avro/testcases/union-array/expected.out diff --git a/plugins/parsers/avro/testdata/union-array/message.json b/plugins/parsers/avro/testcases/union-array/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union-array/message.json rename to plugins/parsers/avro/testcases/union-array/message.json diff --git a/plugins/parsers/avro/testdata/union-array/telegraf.conf b/plugins/parsers/avro/testcases/union-array/telegraf.conf similarity index 93% rename from plugins/parsers/avro/testdata/union-array/telegraf.conf rename to plugins/parsers/avro/testcases/union-array/telegraf.conf index 75ef5cb40de20..f0aa5546f735c 100644 --- a/plugins/parsers/avro/testdata/union-array/telegraf.conf +++ b/plugins/parsers/avro/testcases/union-array/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union-array/message.json"] + files = ["./testcases/union-array/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/union-nullable/expected.out b/plugins/parsers/avro/testcases/union-nullable/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union-nullable/expected.out rename to plugins/parsers/avro/testcases/union-nullable/expected.out diff --git a/plugins/parsers/avro/testdata/union-nullable/message.json b/plugins/parsers/avro/testcases/union-nullable/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union-nullable/message.json rename to plugins/parsers/avro/testcases/union-nullable/message.json diff --git a/plugins/parsers/avro/testdata/union-nullable/telegraf.conf b/plugins/parsers/avro/testcases/union-nullable/telegraf.conf similarity index 95% rename from plugins/parsers/avro/testdata/union-nullable/telegraf.conf rename to plugins/parsers/avro/testcases/union-nullable/telegraf.conf index a03a7e5dc8c59..790e8d676e5b4 100644 --- a/plugins/parsers/avro/testdata/union-nullable/telegraf.conf +++ b/plugins/parsers/avro/testcases/union-nullable/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union-nullable/message.json"] + files = ["./testcases/union-nullable/message.json"] data_format = "avro" avro_format = "json" diff --git a/plugins/parsers/avro/testdata/union/expected.out b/plugins/parsers/avro/testcases/union/expected.out similarity index 100% rename from plugins/parsers/avro/testdata/union/expected.out rename to plugins/parsers/avro/testcases/union/expected.out diff --git a/plugins/parsers/avro/testdata/union/message.json b/plugins/parsers/avro/testcases/union/message.json similarity index 100% rename from plugins/parsers/avro/testdata/union/message.json rename to plugins/parsers/avro/testcases/union/message.json diff --git a/plugins/parsers/avro/testdata/union/telegraf.conf b/plugins/parsers/avro/testcases/union/telegraf.conf similarity index 96% rename from plugins/parsers/avro/testdata/union/telegraf.conf rename to plugins/parsers/avro/testcases/union/telegraf.conf index dad3fb0a2045c..9783ec4f6371e 100644 --- a/plugins/parsers/avro/testdata/union/telegraf.conf +++ b/plugins/parsers/avro/testcases/union/telegraf.conf @@ -1,5 +1,5 @@ [[ inputs.file ]] - files = ["./testdata/union/message.json"] + files = ["./testcases/union/message.json"] data_format = "avro" avro_format = "json" From 302fd28119fa4ca8888ad5a651716e23ba66ca16 Mon Sep 17 00:00:00 2001 From: Alex Gokhale Date: Thu, 5 Dec 2024 16:38:32 +0000 Subject: [PATCH 125/170] feat(logging): Allow overriding message key for structured logging (#16242) --- cmd/telegraf/agent.conf | 4 ++++ cmd/telegraf/telegraf.go | 19 ++++++++-------- config/config.go | 4 ++++ docs/CONFIGURATION.md | 4 ++++ logger/logger.go | 2 ++ logger/structured_logger.go | 36 +++++++++++++++++++++--------- logger/structured_logger_test.go | 38 ++++++++++++++++++++++++++++++++ 7 files changed, 87 insertions(+), 20 deletions(-) diff --git a/cmd/telegraf/agent.conf b/cmd/telegraf/agent.conf index 12fd81ac4008c..dc2961c94bac0 100644 --- a/cmd/telegraf/agent.conf +++ b/cmd/telegraf/agent.conf @@ -57,6 +57,10 @@ ## "structured" or, on Windows, "eventlog". # logformat = "text" + ## Message key for structured logs, to override the default of "msg". + ## Ignored if `logformat` is not "structured". + # structured_log_message_key = "message" + ## Name of the file to be logged to or stderr if unset or empty. This ## setting is ignored for the "eventlog" format. # logfile = "" diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index de886c48eed7d..4fad778933f45 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -364,15 +364,16 @@ func (t *Telegraf) runAgent(ctx context.Context, reloadConfig bool) error { // Setup logging as configured. logConfig := &logger.Config{ - Debug: c.Agent.Debug || t.debug, - Quiet: c.Agent.Quiet || t.quiet, - LogTarget: c.Agent.LogTarget, - LogFormat: c.Agent.LogFormat, - Logfile: c.Agent.Logfile, - RotationInterval: time.Duration(c.Agent.LogfileRotationInterval), - RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize), - RotationMaxArchives: c.Agent.LogfileRotationMaxArchives, - LogWithTimezone: c.Agent.LogWithTimezone, + Debug: c.Agent.Debug || t.debug, + Quiet: c.Agent.Quiet || t.quiet, + LogTarget: c.Agent.LogTarget, + LogFormat: c.Agent.LogFormat, + Logfile: c.Agent.Logfile, + StructuredLogMessageKey: c.Agent.StructuredLogMessageKey, + RotationInterval: time.Duration(c.Agent.LogfileRotationInterval), + RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize), + RotationMaxArchives: c.Agent.LogfileRotationMaxArchives, + LogWithTimezone: c.Agent.LogWithTimezone, } if err := logger.SetupLogging(logConfig); err != nil { diff --git a/config/config.go b/config/config.go index 3ae2025313b4c..6a71646b095da 100644 --- a/config/config.go +++ b/config/config.go @@ -236,6 +236,10 @@ type AgentConfig struct { // Name of the file to be logged to or stderr if empty. Ignored for "eventlog" format. Logfile string `toml:"logfile"` + // Message key for structured logs, to override the default of "msg". + // Ignored if "logformat" is not "structured". + StructuredLogMessageKey string `toml:"structured_log_message_key"` + // The file will be rotated after the time interval specified. When set // to 0 no time based rotation is performed. LogfileRotationInterval Duration `toml:"logfile_rotation_interval"` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index bab126fa06f9c..f104c06049e32 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -307,6 +307,10 @@ The agent table configures Telegraf and the defaults used across all plugins. "structured" or, on Windows, "eventlog". The output file (if any) is determined by the `logfile` setting. +- **structured_log_message_key**: + Message key for structured logs, to override the default of "msg". + Ignored if `logformat` is not "structured". + - **logfile**: Name of the file to be logged to or stderr if unset or empty. This setting is ignored for the "eventlog" format. diff --git a/logger/logger.go b/logger/logger.go index c344838c0b667..ab5a2ff2fb4c9 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -195,6 +195,8 @@ type Config struct { LogWithTimezone string // Logger instance name InstanceName string + // Structured logging message key + StructuredLogMessageKey string // internal log-level logLevel telegraf.LogLevel diff --git a/logger/structured_logger.go b/logger/structured_logger.go index dfe7dc2756f5e..5a4ed86a40e93 100644 --- a/logger/structured_logger.go +++ b/logger/structured_logger.go @@ -41,17 +41,19 @@ func (l *structuredLogger) Print(level telegraf.LogLevel, ts time.Time, _ string } } -var defaultStructuredHandlerOptions = &slog.HandlerOptions{ - Level: slog.Level(-99), - ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr { - // Translate the Telegraf log-levels to strings - if attr.Key == slog.LevelKey { - if level, ok := attr.Value.Any().(slog.Level); ok { - attr.Value = slog.StringValue(telegraf.LogLevel(level).String()) - } +var defaultReplaceAttr = func(_ []string, attr slog.Attr) slog.Attr { + // Translate the Telegraf log-levels to strings + if attr.Key == slog.LevelKey { + if level, ok := attr.Value.Any().(slog.Level); ok { + attr.Value = slog.StringValue(telegraf.LogLevel(level).String()) } - return attr - }, + } + return attr +} + +var defaultStructuredHandlerOptions = &slog.HandlerOptions{ + Level: slog.Level(-99), + ReplaceAttr: defaultReplaceAttr, } func init() { @@ -70,8 +72,20 @@ func init() { writer = w } + structuredHandlerOptions := defaultStructuredHandlerOptions + + if cfg.StructuredLogMessageKey != "" { + structuredHandlerOptions.ReplaceAttr = func(groups []string, attr slog.Attr) slog.Attr { + if attr.Key == slog.MessageKey { + attr.Key = cfg.StructuredLogMessageKey + } + + return defaultReplaceAttr(groups, attr) + } + } + return &structuredLogger{ - handler: slog.NewJSONHandler(writer, defaultStructuredHandlerOptions), + handler: slog.NewJSONHandler(writer, structuredHandlerOptions), output: writer, errlog: log.New(os.Stderr, "", 0), }, nil diff --git a/logger/structured_logger_test.go b/logger/structured_logger_test.go index 89208563a225b..1721bc48f5bcf 100644 --- a/logger/structured_logger_test.go +++ b/logger/structured_logger_test.go @@ -307,6 +307,44 @@ func TestStructuredWriteToFileInRotation(t *testing.T) { require.Len(t, files, 2) } +func TestStructuredLogMessageKey(t *testing.T) { + instance = defaultHandler() + + tmpfile, err := os.CreateTemp("", "") + require.NoError(t, err) + defer os.Remove(tmpfile.Name()) + + cfg := &Config{ + Logfile: tmpfile.Name(), + LogFormat: "structured", + RotationMaxArchives: -1, + Debug: true, + StructuredLogMessageKey: "message", + } + require.NoError(t, SetupLogging(cfg)) + + l := New("testing", "test", "") + l.Info("TEST") + + buf, err := os.ReadFile(tmpfile.Name()) + require.NoError(t, err) + + expected := map[string]interface{}{ + "level": "INFO", + "message": "TEST", + "category": "testing", + "plugin": "test", + } + + var actual map[string]interface{} + require.NoError(t, json.Unmarshal(buf, &actual)) + + require.Contains(t, actual, "time") + require.NotEmpty(t, actual["time"]) + delete(actual, "time") + require.Equal(t, expected, actual) +} + func BenchmarkTelegrafStructuredLogWrite(b *testing.B) { // Discard all logging output l := &structuredLogger{ From f360536b27dd501d732d844a95ad7eac0ee03c5d Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 6 Dec 2024 11:23:35 +0100 Subject: [PATCH 126/170] feat(outputs): Add rate-limiting infrastructure (#16258) --- internal/errors.go | 6 +- models/running_output_test.go | 227 ++++++++++- plugins/common/ratelimiter/config.go | 19 + plugins/common/ratelimiter/limiters.go | 66 ++++ plugins/common/ratelimiter/limiters_test.go | 176 +++++++++ plugins/common/ratelimiter/serializers.go | 100 +++++ .../common/ratelimiter/serializers_test.go | 351 ++++++++++++++++++ 7 files changed, 924 insertions(+), 21 deletions(-) create mode 100644 plugins/common/ratelimiter/config.go create mode 100644 plugins/common/ratelimiter/limiters.go create mode 100644 plugins/common/ratelimiter/limiters_test.go create mode 100644 plugins/common/ratelimiter/serializers.go create mode 100644 plugins/common/ratelimiter/serializers_test.go diff --git a/internal/errors.go b/internal/errors.go index d1e098ea441ce..a36bda794932c 100644 --- a/internal/errors.go +++ b/internal/errors.go @@ -2,7 +2,11 @@ package internal import "errors" -var ErrNotConnected = errors.New("not connected") +var ( + ErrNotConnected = errors.New("not connected") + ErrSerialization = errors.New("serialization of metric(s) failed") + ErrSizeLimitReached = errors.New("size limit reached") +) // StartupError indicates an error that occurred during startup of a plugin // e.g. due to connectivity issues or resources being not yet available. diff --git a/models/running_output_test.go b/models/running_output_test.go index 3c8b9e5951e1a..9a60481d52fa6 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -245,7 +245,7 @@ func TestRunningOutputWriteFail(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 4, 12) // Fill buffer to limit twice @@ -264,7 +264,7 @@ func TestRunningOutputWriteFail(t *testing.T) { // no successful flush yet require.Empty(t, m.Metrics()) - m.failWrite = false + m.batchAcceptSize = 0 err = ro.Write() require.NoError(t, err) @@ -277,7 +277,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 100, 1000) // add 5 metrics @@ -293,7 +293,8 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { // no successful flush yet require.Empty(t, m.Metrics()) - m.failWrite = false + m.batchAcceptSize = 0 + // add 5 more metrics for _, metric := range next5 { ro.AddMetric(metric) @@ -314,7 +315,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 5, 100) // add 5 metrics @@ -357,7 +358,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { // no successful flush yet require.Empty(t, m.Metrics()) - m.failWrite = false + m.batchAcceptSize = 0 err = ro.Write() require.NoError(t, err) @@ -377,7 +378,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { Filter: Filter{}, } - m := &mockOutput{failWrite: true} + m := &mockOutput{batchAcceptSize: -1} ro := NewRunningOutput(m, conf, 5, 1000) // add 5 metrics @@ -399,7 +400,8 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { require.Error(t, err) // unset fail and write metrics - m.failWrite = false + m.batchAcceptSize = 0 + err = ro.Write() require.NoError(t, err) @@ -620,7 +622,7 @@ func TestRunningOutputNonRetryableStartupBehaviorDefault(t *testing.T) { } } -func TestRunningOutputUntypedtartupBehaviorIgnore(t *testing.T) { +func TestRunningOutputUntypedStartupBehaviorIgnore(t *testing.T) { serr := errors.New("untyped err") for _, behavior := range []string{"", "error", "retry", "ignore"} { @@ -692,12 +694,181 @@ func TestRunningOutputPartiallyStarted(t *testing.T) { require.Equal(t, 3, mo.writes) } +func TestRunningOutputWritePartialSuccess(t *testing.T) { + plugin := &mockOutput{ + batchAcceptSize: 4, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 4) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 8) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.Write()) + testutil.RequireMetricsEqual(t, append(first5, next5...), plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + +func TestRunningOutputWritePartialSuccessAndLoss(t *testing.T) { + lost := 0 + plugin := &mockOutput{ + batchAcceptSize: 4, + metricFatalIndex: &lost, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + expected := []telegraf.Metric{ + /* fatal, */ first5[1], first5[2], first5[3], + /* fatal, */ next5[0], next5[1], next5[2], + next5[3], next5[4], + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 3) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.Write(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 6) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.Write()) + testutil.RequireMetricsEqual(t, expected, plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + +func TestRunningOutputWriteBatchPartialSuccess(t *testing.T) { + plugin := &mockOutput{ + batchAcceptSize: 4, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 4) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 8) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.WriteBatch()) + testutil.RequireMetricsEqual(t, append(first5, next5...), plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + +func TestRunningOutputWriteBatchPartialSuccessAndLoss(t *testing.T) { + lost := 0 + plugin := &mockOutput{ + batchAcceptSize: 4, + metricFatalIndex: &lost, + } + model := NewRunningOutput(plugin, &OutputConfig{}, 5, 10) + require.NoError(t, model.Init()) + require.NoError(t, model.Connect()) + defer model.Close() + + // Fill buffer completely + for _, metric := range first5 { + model.AddMetric(metric) + } + for _, metric := range next5 { + model.AddMetric(metric) + } + expected := []telegraf.Metric{ + /* fatal, */ first5[1], first5[2], first5[3], + /* fatal, */ next5[0], next5[1], next5[2], + next5[3], next5[4], + } + + // We no not expect any successful flush yet + require.Empty(t, plugin.Metrics()) + require.Equal(t, 10, model.buffer.Len()) + + // Write to the output. This should only partially succeed with the first + // few metrics removed from buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 3) + require.Equal(t, 6, model.buffer.Len()) + + // The next write should remove the next metrics from the buffer + require.ErrorIs(t, model.WriteBatch(), internal.ErrSizeLimitReached) + require.Len(t, plugin.metrics, 6) + require.Equal(t, 2, model.buffer.Len()) + + // The last write should succeed straight away and all metrics should have + // been received by the output + require.NoError(t, model.WriteBatch()) + testutil.RequireMetricsEqual(t, expected, plugin.metrics) + require.Zero(t, model.buffer.Len()) +} + // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ Filter: Filter{}, } - m := &perfOutput{} ro := NewRunningOutput(m, conf, 1000, 10000) @@ -712,7 +883,6 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { conf := &OutputConfig{ Filter: Filter{}, } - m := &perfOutput{} ro := NewRunningOutput(m, conf, 1000, 10000) @@ -729,10 +899,8 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { conf := &OutputConfig{ Filter: Filter{}, } - m := &perfOutput{failWrite: true} ro := NewRunningOutput(m, conf, 1000, 10000) - for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) } @@ -743,9 +911,11 @@ type mockOutput struct { metrics []telegraf.Metric - // if true, mock write failure - failWrite bool + // Failing output simulation + batchAcceptSize int + metricFatalIndex *int + // Startup error simulation startupError error startupErrorCount int writes int @@ -761,11 +931,11 @@ func (m *mockOutput) Connect() error { return m.startupError } -func (m *mockOutput) Close() error { +func (*mockOutput) Close() error { return nil } -func (m *mockOutput) SampleConfig() string { +func (*mockOutput) SampleConfig() string { return "" } @@ -774,12 +944,29 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { m.Lock() defer m.Unlock() - if m.failWrite { + + // Simulate a failed write + if m.batchAcceptSize < 0 { return errors.New("failed write") } - m.metrics = append(m.metrics, metrics...) - return nil + // Simulate a successful write + if m.batchAcceptSize == 0 || len(metrics) <= m.batchAcceptSize { + m.metrics = append(m.metrics, metrics...) + return nil + } + + // Simulate a partially successful write + werr := &internal.PartialWriteError{Err: internal.ErrSizeLimitReached} + for i, x := range metrics { + if m.metricFatalIndex != nil && i == *m.metricFatalIndex { + werr.MetricsReject = append(werr.MetricsReject, i) + } else if i < m.batchAcceptSize { + m.metrics = append(m.metrics, x) + werr.MetricsAccept = append(werr.MetricsAccept, i) + } + } + return werr } func (m *mockOutput) Metrics() []telegraf.Metric { diff --git a/plugins/common/ratelimiter/config.go b/plugins/common/ratelimiter/config.go new file mode 100644 index 0000000000000..a2ca077c05f59 --- /dev/null +++ b/plugins/common/ratelimiter/config.go @@ -0,0 +1,19 @@ +package ratelimiter + +import ( + "time" + + "github.com/influxdata/telegraf/config" +) + +type RateLimitConfig struct { + Limit config.Size `toml:"rate_limit"` + Period config.Duration `toml:"rate_limit_period"` +} + +func (cfg *RateLimitConfig) CreateRateLimiter() *RateLimiter { + return &RateLimiter{ + limit: int64(cfg.Limit), + period: time.Duration(cfg.Period), + } +} diff --git a/plugins/common/ratelimiter/limiters.go b/plugins/common/ratelimiter/limiters.go new file mode 100644 index 0000000000000..f24d08b6239f1 --- /dev/null +++ b/plugins/common/ratelimiter/limiters.go @@ -0,0 +1,66 @@ +package ratelimiter + +import ( + "errors" + "math" + "time" +) + +var ( + ErrLimitExceeded = errors.New("not enough tokens") +) + +type RateLimiter struct { + limit int64 + period time.Duration + periodStart time.Time + remaining int64 +} + +func (r *RateLimiter) Remaining(t time.Time) int64 { + if r.limit == 0 { + return math.MaxInt64 + } + + // Check for corner case + if !r.periodStart.Before(t) { + return 0 + } + + // We are in a new period, so the complete size is available + deltat := t.Sub(r.periodStart) + if deltat >= r.period { + return r.limit + } + + return r.remaining +} + +func (r *RateLimiter) Accept(t time.Time, used int64) { + if r.limit == 0 || r.periodStart.After(t) { + return + } + + // Remember the first query and reset if we are in a new period + if r.periodStart.IsZero() { + r.periodStart = t + r.remaining = r.limit + } else if deltat := t.Sub(r.periodStart); deltat >= r.period { + r.periodStart = r.periodStart.Add(deltat.Truncate(r.period)) + r.remaining = r.limit + } + + // Update the state + r.remaining = max(r.remaining-used, 0) +} + +func (r *RateLimiter) Undo(t time.Time, used int64) { + // Do nothing if we are not in the current period or unlimited because we + // already reset the limit on a new window. + if r.limit == 0 || r.periodStart.IsZero() || r.periodStart.After(t) || t.Sub(r.periodStart) >= r.period { + return + } + + // Undo the state update + r.remaining = min(r.remaining+used, r.limit) +} diff --git a/plugins/common/ratelimiter/limiters_test.go b/plugins/common/ratelimiter/limiters_test.go new file mode 100644 index 0000000000000..e886b1cc80221 --- /dev/null +++ b/plugins/common/ratelimiter/limiters_test.go @@ -0,0 +1,176 @@ +package ratelimiter + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf/config" + "github.com/stretchr/testify/require" +) + +func TestUnlimited(t *testing.T) { + cfg := &RateLimitConfig{} + limiter := cfg.CreateRateLimiter() + + start := time.Now() + end := start.Add(30 * time.Minute) + for ts := start; ts.Before(end); ts = ts.Add(1 * time.Minute) { + require.EqualValues(t, int64(math.MaxInt64), limiter.Remaining(ts)) + } +} + +func TestUnlimitedWithPeriod(t *testing.T) { + cfg := &RateLimitConfig{ + Period: config.Duration(5 * time.Minute), + } + limiter := cfg.CreateRateLimiter() + + start := time.Now() + end := start.Add(30 * time.Minute) + for ts := start; ts.Before(end); ts = ts.Add(1 * time.Minute) { + require.EqualValues(t, int64(math.MaxInt64), limiter.Remaining(ts)) + } +} + +func TestLimited(t *testing.T) { + tests := []struct { + name string + cfg *RateLimitConfig + step time.Duration + request []int64 + expected []int64 + }{ + { + name: "constant usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{300}, + expected: []int64{1024, 724, 424, 124, 0, 1024, 724, 424, 124, 0}, + }, + { + name: "variable usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{256, 128, 512, 64, 64, 1024, 0, 0, 0, 0, 128, 4096, 4096, 4096, 4096, 4096}, + expected: []int64{1024, 768, 640, 128, 64, 1024, 0, 0, 0, 0, 1024, 896, 0, 0, 0, 1024}, + }, + } + + // Run the test with an offset of period multiples + for _, tt := range tests { + t.Run(tt.name+" at period", func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + } + }) + } + + // Run the test at a time of period multiples + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step).Add(1 * time.Second) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + } + }) + } +} + +func TestUndo(t *testing.T) { + tests := []struct { + name string + cfg *RateLimitConfig + step time.Duration + request []int64 + expected []int64 + }{ + { + name: "constant usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{300}, + expected: []int64{1024, 724, 424, 124, 124, 1024, 724, 424, 124, 124}, + }, + { + name: "variable usage", + cfg: &RateLimitConfig{ + Limit: config.Size(1024), + Period: config.Duration(5 * time.Minute), + }, + step: time.Minute, + request: []int64{256, 128, 512, 64, 64, 1024, 0, 0, 0, 0, 128, 4096, 4096, 4096, 4096, 4096}, + expected: []int64{1024, 768, 640, 128, 64, 1024, 0, 0, 0, 0, 1024, 896, 896, 896, 896, 1024}, + }, + } + + // Run the test with an offset of period multiples + for _, tt := range tests { + t.Run(tt.name+" at period", func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + // Undo too large operations + if tt.request[i%len(tt.request)] > remaining { + limiter.Undo(ts, use) + } + } + }) + } + + // Run the test at a time of period multiples + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the limiter + limiter := tt.cfg.CreateRateLimiter() + + // Compute the actual values + start := time.Now().Truncate(tt.step).Add(1 * time.Second) + for i, expected := range tt.expected { + ts := start.Add(time.Duration(i) * tt.step) + remaining := limiter.Remaining(ts) + use := min(remaining, tt.request[i%len(tt.request)]) + require.Equalf(t, expected, remaining, "mismatch at index %d", i) + limiter.Accept(ts, use) + // Undo too large operations + if tt.request[i%len(tt.request)] > remaining { + limiter.Undo(ts, use) + } + } + }) + } +} diff --git a/plugins/common/ratelimiter/serializers.go b/plugins/common/ratelimiter/serializers.go new file mode 100644 index 0000000000000..6bd6ce78e0ff9 --- /dev/null +++ b/plugins/common/ratelimiter/serializers.go @@ -0,0 +1,100 @@ +package ratelimiter + +import ( + "bytes" + "math" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" +) + +// Serializer interface abstracting the different implementations of a +// limited-size serializer +type Serializer interface { + Serialize(metric telegraf.Metric, limit int64) ([]byte, error) + SerializeBatch(metrics []telegraf.Metric, limit int64) ([]byte, error) +} + +// Individual serializers do serialize each metric individually using the +// serializer's Serialize() function and add the resulting output to the buffer +// until the limit is reached. This only works for serializers NOT requiring +// the serialization of a batch as-a-whole. +type IndividualSerializer struct { + serializer telegraf.Serializer + buffer *bytes.Buffer +} + +func NewIndividualSerializer(s telegraf.Serializer) *IndividualSerializer { + return &IndividualSerializer{ + serializer: s, + buffer: &bytes.Buffer{}, + } +} + +func (s *IndividualSerializer) Serialize(metric telegraf.Metric, limit int64) ([]byte, error) { + // Do the serialization + buf, err := s.serializer.Serialize(metric) + if err != nil { + return nil, err + } + + // The serialized metric fits into the limit, so output it + if buflen := int64(len(buf)); buflen <= limit { + return buf, nil + } + + // The serialized metric exceeds the limit + return nil, internal.ErrSizeLimitReached +} + +func (s *IndividualSerializer) SerializeBatch(metrics []telegraf.Metric, limit int64) ([]byte, error) { + // Grow the buffer so it can hold at least the required size. This will + // save us from reallocate often + s.buffer.Reset() + if limit > 0 && limit < int64(math.MaxInt) { + s.buffer.Grow(int(limit)) + } + + // Prepare a potential write error and be optimistic + werr := &internal.PartialWriteError{ + MetricsAccept: make([]int, 0, len(metrics)), + } + + // Iterate through the metrics, serialize them and add them to the output + // buffer if they are within the size limit. + var used int64 + for i, m := range metrics { + buf, err := s.serializer.Serialize(m) + if err != nil { + // Failing serialization is a fatal error so mark the metric as such + werr.Err = internal.ErrSerialization + werr.MetricsReject = append(werr.MetricsReject, i) + werr.MetricsRejectErrors = append(werr.MetricsRejectErrors, err) + continue + } + + // The serialized metric fits into the limit, so add it to the output + if usedAdded := used + int64(len(buf)); usedAdded <= limit { + if _, err := s.buffer.Write(buf); err != nil { + return nil, err + } + werr.MetricsAccept = append(werr.MetricsAccept, i) + used = usedAdded + continue + } + + // Return only the size-limit-reached error if all metrics failed. + if used == 0 { + return nil, internal.ErrSizeLimitReached + } + + // Adding the serialized metric would exceed the limit so exit with an + // WriteError and fill in the required information + werr.Err = internal.ErrSizeLimitReached + break + } + if werr.Err != nil { + return s.buffer.Bytes(), werr + } + return s.buffer.Bytes(), nil +} diff --git a/plugins/common/ratelimiter/serializers_test.go b/plugins/common/ratelimiter/serializers_test.go new file mode 100644 index 0000000000000..06cc88a395674 --- /dev/null +++ b/plugins/common/ratelimiter/serializers_test.go @@ -0,0 +1,351 @@ +package ratelimiter + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/stretchr/testify/require" +) + +func TestIndividualSerializer(t *testing.T) { + input := []telegraf.Metric{ + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 123, + "temperature": 25.0, + "pressure": 1023.4, + }, + time.Unix(1722443551, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "B", + "status": "failed", + }, + map[string]interface{}{ + "operating_hours": 8430, + "temperature": 65.2, + "pressure": 985.9, + }, + time.Unix(1722443554, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "C", + "status": "warning", + }, + map[string]interface{}{ + "operating_hours": 6765, + "temperature": 42.5, + "pressure": 986.1, + }, + time.Unix(1722443555, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "localhost", + "location": "factory_north", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443556, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 5544, + "temperature": 18.6, + "pressure": 1069.4, + }, + time.Unix(1722443552, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "B", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 65, + "temperature": 29.7, + "pressure": 1101.2, + }, + time.Unix(1722443553, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443559, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "C", + "status": "off", + }, + map[string]interface{}{ + "operating_hours": 0, + "temperature": 0.0, + "pressure": 0.0, + }, + time.Unix(1722443562, 0), + ), + } + //nolint:lll // Resulting metrics should not be wrapped for readability + expected := []string{ + "serializer_test,location=factory_north,machine=A,source=localhost,status=ok operating_hours=123i,pressure=1023.4,temperature=25 1722443551000000000\n" + + "serializer_test,location=factory_north,machine=B,source=localhost,status=failed operating_hours=8430i,pressure=985.9,temperature=65.2 1722443554000000000\n", + "serializer_test,location=factory_north,machine=C,source=localhost,status=warning operating_hours=6765i,pressure=986.1,temperature=42.5 1722443555000000000\n" + + "device,location=factory_north,source=localhost status=\"ok\" 1722443556000000000\n" + + "serializer_test,location=factory_south,machine=A,source=gateway_af43e,status=ok operating_hours=5544i,pressure=1069.4,temperature=18.6 1722443552000000000\n", + "serializer_test,location=factory_south,machine=B,source=gateway_af43e,status=ok operating_hours=65i,pressure=1101.2,temperature=29.7 1722443553000000000\n" + + "device,location=factory_south,source=gateway_af43e status=\"ok\" 1722443559000000000\n" + + "serializer_test,location=factory_south,machine=C,source=gateway_af43e,status=off operating_hours=0i,pressure=0,temperature=0 1722443562000000000\n", + } + + // Setup the limited serializer + s := &influx.Serializer{SortFields: true} + require.NoError(t, s.Init()) + serializer := NewIndividualSerializer(s) + + var werr *internal.PartialWriteError + + // Do the first serialization runs with all metrics + buf, err := serializer.SerializeBatch(input, 400) + require.ErrorAs(t, err, &werr) + require.ErrorIs(t, werr.Err, internal.ErrSizeLimitReached) + require.EqualValues(t, []int{0, 1}, werr.MetricsAccept) + require.Empty(t, werr.MetricsReject) + require.Equal(t, expected[0], string(buf)) + + // Run again with the successful metrics removed + buf, err = serializer.SerializeBatch(input[2:], 400) + require.ErrorAs(t, err, &werr) + require.ErrorIs(t, werr.Err, internal.ErrSizeLimitReached) + require.EqualValues(t, []int{0, 1, 2}, werr.MetricsAccept) + require.Empty(t, werr.MetricsReject) + require.Equal(t, expected[1], string(buf)) + + // Final run with the successful metrics removed + buf, err = serializer.SerializeBatch(input[5:], 400) + require.NoError(t, err) + require.Equal(t, expected[2], string(buf)) +} + +func TestIndividualSerializerFirstTooBig(t *testing.T) { + input := []telegraf.Metric{ + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 123, + "temperature": 25.0, + "pressure": 1023.4, + }, + time.Unix(1722443551, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "B", + "status": "failed", + }, + map[string]interface{}{ + "operating_hours": 8430, + "temperature": 65.2, + "pressure": 985.9, + }, + time.Unix(1722443554, 0), + ), + } + + // Setup the limited serializer + s := &influx.Serializer{SortFields: true} + require.NoError(t, s.Init()) + serializer := NewIndividualSerializer(s) + + // The first metric will already exceed the size so all metrics fail and + // we expect a shortcut error. + buf, err := serializer.SerializeBatch(input, 100) + require.ErrorIs(t, err, internal.ErrSizeLimitReached) + require.Empty(t, buf) +} + +func TestIndividualSerializerUnlimited(t *testing.T) { + input := []telegraf.Metric{ + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 123, + "temperature": 25.0, + "pressure": 1023.4, + }, + time.Unix(1722443551, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "B", + "status": "failed", + }, + map[string]interface{}{ + "operating_hours": 8430, + "temperature": 65.2, + "pressure": 985.9, + }, + time.Unix(1722443554, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "localhost", + "location": "factory_north", + "machine": "C", + "status": "warning", + }, + map[string]interface{}{ + "operating_hours": 6765, + "temperature": 42.5, + "pressure": 986.1, + }, + time.Unix(1722443555, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "localhost", + "location": "factory_north", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443556, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "A", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 5544, + "temperature": 18.6, + "pressure": 1069.4, + }, + time.Unix(1722443552, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "B", + "status": "ok", + }, + map[string]interface{}{ + "operating_hours": 65, + "temperature": 29.7, + "pressure": 1101.2, + }, + time.Unix(1722443553, 0), + ), + metric.New( + "device", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + }, + map[string]interface{}{ + "status": "ok", + }, + time.Unix(1722443559, 0), + ), + metric.New( + "serializer_test", + map[string]string{ + "source": "gateway_af43e", + "location": "factory_south", + "machine": "C", + "status": "off", + }, + map[string]interface{}{ + "operating_hours": 0, + "temperature": 0.0, + "pressure": 0.0, + }, + time.Unix(1722443562, 0), + ), + } + //nolint:lll // Resulting metrics should not be wrapped for readability + expected := "serializer_test,location=factory_north,machine=A,source=localhost,status=ok operating_hours=123i,pressure=1023.4,temperature=25 1722443551000000000\n" + + "serializer_test,location=factory_north,machine=B,source=localhost,status=failed operating_hours=8430i,pressure=985.9,temperature=65.2 1722443554000000000\n" + + "serializer_test,location=factory_north,machine=C,source=localhost,status=warning operating_hours=6765i,pressure=986.1,temperature=42.5 1722443555000000000\n" + + "device,location=factory_north,source=localhost status=\"ok\" 1722443556000000000\n" + + "serializer_test,location=factory_south,machine=A,source=gateway_af43e,status=ok operating_hours=5544i,pressure=1069.4,temperature=18.6 1722443552000000000\n" + + "serializer_test,location=factory_south,machine=B,source=gateway_af43e,status=ok operating_hours=65i,pressure=1101.2,temperature=29.7 1722443553000000000\n" + + "device,location=factory_south,source=gateway_af43e status=\"ok\" 1722443559000000000\n" + + "serializer_test,location=factory_south,machine=C,source=gateway_af43e,status=off operating_hours=0i,pressure=0,temperature=0 1722443562000000000\n" + + // Setup the limited serializer + s := &influx.Serializer{SortFields: true} + require.NoError(t, s.Init()) + serializer := NewIndividualSerializer(s) + + // Do the first serialization runs with all metrics + buf, err := serializer.SerializeBatch(input, math.MaxInt64) + require.NoError(t, err) + require.Equal(t, expected, string(buf)) +} From 2c2ff82eb050e90ed7d426276bc9cafc7ca121d4 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:50:21 +0100 Subject: [PATCH 127/170] feat(outputs.influxdb_v2): Add rate limit implementation (#15742) --- plugins/common/ratelimiter/config.go | 8 +- plugins/common/ratelimiter/limiters_test.go | 24 +++- plugins/outputs/influxdb_v2/README.md | 6 + plugins/outputs/influxdb_v2/http.go | 121 ++++++++++++------ plugins/outputs/influxdb_v2/influxdb_v2.go | 22 +++- .../outputs/influxdb_v2/influxdb_v2_test.go | 113 ++++++++++++++++ plugins/outputs/influxdb_v2/sample.conf | 6 + 7 files changed, 245 insertions(+), 55 deletions(-) diff --git a/plugins/common/ratelimiter/config.go b/plugins/common/ratelimiter/config.go index a2ca077c05f59..9ebbeb2704c16 100644 --- a/plugins/common/ratelimiter/config.go +++ b/plugins/common/ratelimiter/config.go @@ -1,6 +1,7 @@ package ratelimiter import ( + "errors" "time" "github.com/influxdata/telegraf/config" @@ -11,9 +12,12 @@ type RateLimitConfig struct { Period config.Duration `toml:"rate_limit_period"` } -func (cfg *RateLimitConfig) CreateRateLimiter() *RateLimiter { +func (cfg *RateLimitConfig) CreateRateLimiter() (*RateLimiter, error) { + if cfg.Limit > 0 && cfg.Period <= 0 { + return nil, errors.New("invalid period for rate-limit") + } return &RateLimiter{ limit: int64(cfg.Limit), period: time.Duration(cfg.Period), - } + }, nil } diff --git a/plugins/common/ratelimiter/limiters_test.go b/plugins/common/ratelimiter/limiters_test.go index e886b1cc80221..28b53159ce448 100644 --- a/plugins/common/ratelimiter/limiters_test.go +++ b/plugins/common/ratelimiter/limiters_test.go @@ -9,9 +9,16 @@ import ( "github.com/stretchr/testify/require" ) +func TestInvalidPeriod(t *testing.T) { + cfg := &RateLimitConfig{Limit: config.Size(1024)} + _, err := cfg.CreateRateLimiter() + require.ErrorContains(t, err, "invalid period for rate-limit") +} + func TestUnlimited(t *testing.T) { cfg := &RateLimitConfig{} - limiter := cfg.CreateRateLimiter() + limiter, err := cfg.CreateRateLimiter() + require.NoError(t, err) start := time.Now() end := start.Add(30 * time.Minute) @@ -24,7 +31,8 @@ func TestUnlimitedWithPeriod(t *testing.T) { cfg := &RateLimitConfig{ Period: config.Duration(5 * time.Minute), } - limiter := cfg.CreateRateLimiter() + limiter, err := cfg.CreateRateLimiter() + require.NoError(t, err) start := time.Now() end := start.Add(30 * time.Minute) @@ -67,7 +75,8 @@ func TestLimited(t *testing.T) { for _, tt := range tests { t.Run(tt.name+" at period", func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step) @@ -85,7 +94,8 @@ func TestLimited(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step).Add(1 * time.Second) @@ -134,7 +144,8 @@ func TestUndo(t *testing.T) { for _, tt := range tests { t.Run(tt.name+" at period", func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step) @@ -156,7 +167,8 @@ func TestUndo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Setup the limiter - limiter := tt.cfg.CreateRateLimiter() + limiter, err := tt.cfg.CreateRateLimiter() + require.NoError(t, err) // Compute the actual values start := time.Now().Truncate(tt.step).Add(1 * time.Second) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 239e953e6fcc9..b9a78f0b00a04 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -101,6 +101,12 @@ to use them. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Rate limits for sending data (disabled by default) + ## Available, uncompressed payload size e.g. "5Mb" + # rate_limit = "unlimited" + ## Fixed time-window for the available payload size e.g. "5m" + # rate_limit_period = "0s" ``` ## Metrics diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 34e698dd75c94..8a622a5f4b522 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -22,7 +22,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/influxdata/telegraf/plugins/common/ratelimiter" ) type APIError struct { @@ -59,8 +59,9 @@ type httpClient struct { pingTimeout config.Duration readIdleTimeout config.Duration tlsConfig *tls.Config - serializer *influx.Serializer encoder internal.ContentEncoder + serializer ratelimiter.Serializer + rateLimiter *ratelimiter.RateLimiter client *http.Client params url.Values retryTime time.Time @@ -160,52 +161,69 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } batches := make(map[string][]telegraf.Metric) + batchIndices := make(map[string][]int) if c.bucketTag == "" { - err := c.writeBatch(ctx, c.bucket, metrics) - if err != nil { - var apiErr *APIError - if errors.As(err, &apiErr) { - if apiErr.StatusCode == http.StatusRequestEntityTooLarge { - return c.splitAndWriteBatch(ctx, c.bucket, metrics) - } - } - - return err + batches[c.bucket] = metrics + batchIndices[c.bucket] = make([]int, len(metrics)) + for i := range metrics { + batchIndices[c.bucket][i] = i } } else { - for _, metric := range metrics { + for i, metric := range metrics { bucket, ok := metric.GetTag(c.bucketTag) if !ok { bucket = c.bucket - } - - if _, ok := batches[bucket]; !ok { - batches[bucket] = make([]telegraf.Metric, 0) - } - - if c.excludeBucketTag { - // Avoid modifying the metric in case we need to retry the request. + } else if c.excludeBucketTag { + // Avoid modifying the metric if we do remove the tag metric = metric.Copy() metric.Accept() metric.RemoveTag(c.bucketTag) } batches[bucket] = append(batches[bucket], metric) + batchIndices[c.bucket] = append(batchIndices[c.bucket], i) + } + } + + var wErr internal.PartialWriteError + for bucket, batch := range batches { + err := c.writeBatch(ctx, bucket, batch) + if err == nil { + wErr.MetricsAccept = append(wErr.MetricsAccept, batchIndices[bucket]...) + continue } - for bucket, batch := range batches { - err := c.writeBatch(ctx, bucket, batch) - if err != nil { - var apiErr *APIError - if errors.As(err, &apiErr) { - if apiErr.StatusCode == http.StatusRequestEntityTooLarge { - return c.splitAndWriteBatch(ctx, c.bucket, metrics) - } - } - - return err + // Check if the request was too large and split it + var apiErr *APIError + if errors.As(err, &apiErr) { + if apiErr.StatusCode == http.StatusRequestEntityTooLarge { + return c.splitAndWriteBatch(ctx, c.bucket, metrics) } + wErr.Err = err + wErr.MetricsReject = append(wErr.MetricsReject, batchIndices[bucket]...) + return &wErr } + + // Check if we got a write error and if so, translate the returned + // metric indices to return the original indices in case of bucketing + var writeErr *internal.PartialWriteError + if errors.As(err, &writeErr) { + wErr.Err = writeErr.Err + for _, idx := range writeErr.MetricsAccept { + wErr.MetricsAccept = append(wErr.MetricsAccept, batchIndices[bucket][idx]) + } + for _, idx := range writeErr.MetricsReject { + wErr.MetricsReject = append(wErr.MetricsReject, batchIndices[bucket][idx]) + } + if !errors.Is(writeErr.Err, internal.ErrSizeLimitReached) { + continue + } + return &wErr + } + + // Return the error without special treatment + wErr.Err = err + return &wErr } return nil } @@ -222,11 +240,16 @@ func (c *httpClient) splitAndWriteBatch(ctx context.Context, bucket string, metr } func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { - // Serialize the metrics - body, err := c.serializer.SerializeBatch(metrics) - if err != nil { - return err + // Get the current limit for the outbound data + ratets := time.Now() + limit := c.rateLimiter.Remaining(ratets) + + // Serialize the metrics with the remaining limit, exit early if nothing was serialized + body, werr := c.serializer.SerializeBatch(metrics, limit) + if werr != nil && !errors.Is(werr, internal.ErrSizeLimitReached) || len(body) == 0 { + return werr } + used := int64(len(body)) // Encode the content if requested if c.encoder != nil { @@ -249,6 +272,7 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te c.addHeaders(req) // Execute the request + c.rateLimiter.Accept(ratets, used) resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { internal.OnClientError(c.client, err) @@ -269,7 +293,7 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te http.StatusMultiStatus, http.StatusAlreadyReported: c.retryCount = 0 - return nil + return werr } // We got an error and now try to decode further @@ -294,11 +318,18 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te http.StatusBadRequest, // request was received but server refused to process it due to a semantic problem with the request. // for example, submitting metrics outside the retention period. - // Clients should *not* repeat the request and the metrics should be dropped. http.StatusUnprocessableEntity, http.StatusNotAcceptable: - c.log.Errorf("Failed to write metric to %s (will be dropped: %s): %s\n", bucket, resp.Status, desc) - return nil + + // Clients should *not* repeat the request and the metrics should be dropped. + rejected := make([]int, 0, len(metrics)) + for i := range len(metrics) { + rejected = append(rejected, i) + } + return &internal.PartialWriteError{ + Err: fmt.Errorf("failed to write metric to %s (will be dropped: %s): %s", bucket, resp.Status, desc), + MetricsReject: rejected, + } case http.StatusUnauthorized, http.StatusForbidden: return fmt.Errorf("failed to write metric to %s (%s): %s", bucket, resp.Status, desc) case http.StatusTooManyRequests, @@ -316,8 +347,14 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te // if it's any other 4xx code, the client should not retry as it's the client's mistake. // retrying will not make the request magically work. if len(resp.Status) > 0 && resp.Status[0] == '4' { - c.log.Errorf("Failed to write metric to %s (will be dropped: %s): %s\n", bucket, resp.Status, desc) - return nil + rejected := make([]int, 0, len(metrics)) + for i := range len(metrics) { + rejected = append(rejected, i) + } + return &internal.PartialWriteError{ + Err: fmt.Errorf("failed to write metric to %s (will be dropped: %s): %s", bucket, resp.Status, desc), + MetricsReject: rejected, + } } // This is only until platform spec is fully implemented. As of the diff --git a/plugins/outputs/influxdb_v2/influxdb_v2.go b/plugins/outputs/influxdb_v2/influxdb_v2.go index 15a66632788e2..89b0d8d2f875a 100644 --- a/plugins/outputs/influxdb_v2/influxdb_v2.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/ratelimiter" commontls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -44,10 +45,11 @@ type InfluxDB struct { ReadIdleTimeout config.Duration `toml:"read_idle_timeout"` Log telegraf.Logger `toml:"-"` commontls.ClientConfig + ratelimiter.RateLimitConfig clients []*httpClient encoder internal.ContentEncoder - serializer *influx.Serializer + serializer ratelimiter.Serializer tlsCfg *tls.Config } @@ -65,7 +67,7 @@ func (i *InfluxDB) Init() error { i.URLs = append(i.URLs, "http://localhost:8086") } - // Check options + // Init encoding if configured switch i.ContentEncoding { case "", "gzip": i.ContentEncoding = "gzip" @@ -80,13 +82,14 @@ func (i *InfluxDB) Init() error { } // Setup the limited serializer - i.serializer = &influx.Serializer{ + serializer := &influx.Serializer{ UintSupport: i.UintSupport, OmitTimestamp: i.OmitTimestamp, } - if err := i.serializer.Init(); err != nil { + if err := serializer.Init(); err != nil { return fmt.Errorf("setting up serializer failed: %w", err) } + i.serializer = ratelimiter.NewIndividualSerializer(serializer) // Setup the client config tlsCfg, err := i.ClientConfig.TLSConfig() @@ -142,6 +145,10 @@ func (i *InfluxDB) Connect() error { switch parts.Scheme { case "http", "https", "unix": + limiter, err := i.RateLimitConfig.CreateRateLimiter() + if err != nil { + return err + } c := &httpClient{ url: parts, localAddr: localAddr, @@ -158,8 +165,9 @@ func (i *InfluxDB) Connect() error { tlsConfig: i.tlsCfg, pingTimeout: i.PingTimeout, readIdleTimeout: i.ReadIdleTimeout, - serializer: i.serializer, encoder: i.encoder, + rateLimiter: limiter, + serializer: i.serializer, log: i.Log, } @@ -191,6 +199,10 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { for _, n := range rand.Perm(len(i.clients)) { client := i.clients[n] if err := client.Write(ctx, metrics); err != nil { + var werr *internal.PartialWriteError + if errors.As(err, &werr) || errors.Is(err, internal.ErrSizeLimitReached) { + return err + } i.Log.Errorf("When writing to [%s]: %v", client.url, err) continue } diff --git a/plugins/outputs/influxdb_v2/influxdb_v2_test.go b/plugins/outputs/influxdb_v2/influxdb_v2_test.go index 36c3c3b08e0d9..f93617a38744e 100644 --- a/plugins/outputs/influxdb_v2/influxdb_v2_test.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2_test.go @@ -7,6 +7,7 @@ import ( "net/http/httptest" "reflect" "strings" + "sync/atomic" "testing" "time" @@ -14,7 +15,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/common/ratelimiter" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" @@ -373,3 +376,113 @@ func TestTooLargeWriteRetry(t *testing.T) { } require.Error(t, plugin.Write(hugeMetrics)) } + +func TestRateLimit(t *testing.T) { + // Setup a test server + var received atomic.Uint64 + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v2/write": + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusUnprocessableEntity) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusUnprocessableEntity) + return + } + received.Add(uint64(len(body))) + + w.WriteHeader(http.StatusNoContent) + + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + // Setup plugin and connect + plugin := &influxdb.InfluxDB{ + URLs: []string{"http://" + ts.Listener.Addr().String()}, + Bucket: "telegraf", + ContentEncoding: "identity", + RateLimitConfig: ratelimiter.RateLimitConfig{ + Limit: 50, + Period: config.Duration(time.Second), + }, + Log: &testutil.Logger{}, + } + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + defer plugin.Close() + + // Together the metric batch size is too big, split up, we get success + metrics := []telegraf.Metric{ + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 1), + ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 99.0, + }, + time.Unix(0, 2), + ), + metric.New( + "operating_hours", + map[string]string{ + "machine": "A", + }, + map[string]interface{}{ + "value": 123.456, + }, + time.Unix(0, 3), + ), + metric.New( + "status", + map[string]string{ + "machine": "B", + }, + map[string]interface{}{ + "temp": 48.235, + "remaining": 999.999, + }, + time.Unix(0, 4), + ), + } + + // Write the metrics the first time. Only the first two metrics should be + // received by the server due to the rate limit. + require.ErrorIs(t, plugin.Write(metrics), internal.ErrSizeLimitReached) + require.LessOrEqual(t, received.Load(), uint64(30)) + + // A direct follow-up write attempt with the remaining metrics should fail + // due to the rate limit being reached + require.ErrorIs(t, plugin.Write(metrics[2:]), internal.ErrSizeLimitReached) + require.LessOrEqual(t, received.Load(), uint64(30)) + + // Wait for at least the period (plus some safety margin) to write the third metric + time.Sleep(time.Duration(plugin.RateLimitConfig.Period) + 100*time.Millisecond) + require.ErrorIs(t, plugin.Write(metrics[2:]), internal.ErrSizeLimitReached) + require.Greater(t, received.Load(), uint64(30)) + require.LessOrEqual(t, received.Load(), uint64(72)) + + // Wait again for the period for at least the period (plus some safety margin) + // to write the last metric. This should finally succeed as all metrics + // are written. + time.Sleep(time.Duration(plugin.RateLimitConfig.Period) + 100*time.Millisecond) + require.NoError(t, plugin.Write(metrics[3:])) + require.Equal(t, uint64(121), received.Load()) +} diff --git a/plugins/outputs/influxdb_v2/sample.conf b/plugins/outputs/influxdb_v2/sample.conf index 5fc41a6613686..e5de679fcd7fb 100644 --- a/plugins/outputs/influxdb_v2/sample.conf +++ b/plugins/outputs/influxdb_v2/sample.conf @@ -71,3 +71,9 @@ # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Rate limits for sending data (disabled by default) + ## Available, uncompressed payload size e.g. "5Mb" + # rate_limit = "unlimited" + ## Fixed time-window for the available payload size e.g. "5m" + # rate_limit_period = "0s" From 5bb45983f8d2ba5d89c942dc913d6a735738e0f3 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:50:49 +0100 Subject: [PATCH 128/170] chore: Update go to v1.23.4 (#16265) --- .circleci/config.yml | 2 +- .github/workflows/readme-linter.yml | 2 +- scripts/ci.docker | 2 +- scripts/installgo_linux.sh | 4 ++-- scripts/installgo_mac.sh | 6 +++--- scripts/installgo_windows.sh | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 20568a9c4284f..3ca65eca9e3c6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,7 +8,7 @@ executors: working_directory: '/go/src/github.com/influxdata/telegraf' resource_class: large docker: - - image: 'quay.io/influxdb/telegraf-ci:1.23.3' + - image: 'quay.io/influxdb/telegraf-ci:1.23.4' environment: GOFLAGS: -p=4 mac: diff --git a/.github/workflows/readme-linter.yml b/.github/workflows/readme-linter.yml index e8bca6d257e49..a23a1fccaab9b 100644 --- a/.github/workflows/readme-linter.yml +++ b/.github/workflows/readme-linter.yml @@ -11,7 +11,7 @@ jobs: steps: - uses: actions/setup-go@v5 with: - go-version: '1.23.3' + go-version: '1.23.4' - uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/scripts/ci.docker b/scripts/ci.docker index 65eed0d0af29a..b50f161b4ee5b 100644 --- a/scripts/ci.docker +++ b/scripts/ci.docker @@ -1,4 +1,4 @@ -FROM golang:1.23.3 +FROM golang:1.23.4 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/installgo_linux.sh b/scripts/installgo_linux.sh index e792f11cf39c4..a047d679c8bc9 100644 --- a/scripts/installgo_linux.sh +++ b/scripts/installgo_linux.sh @@ -2,10 +2,10 @@ set -eux -GO_VERSION="1.23.3" +GO_VERSION="1.23.4" GO_ARCH="linux-amd64" # from https://go.dev/dl -GO_VERSION_SHA="a0afb9744c00648bafb1b90b4aba5bdb86f424f02f9275399ce0c20b93a2c3a8" +GO_VERSION_SHA="6924efde5de86fe277676e929dc9917d466efa02fb934197bc2eba35d5680971" # Download Go and verify Go tarball setup_go () { diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index 076a53223b7bd..50d6909418ed0 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,9 +3,9 @@ set -eux ARCH=$(uname -m) -GO_VERSION="1.23.3" -GO_VERSION_SHA_arm64="31e119fe9bde6e105407a32558d5b5fa6ca11e2bd17f8b7b2f8a06aba16a0632" # from https://go.dev/dl -GO_VERSION_SHA_amd64="c7e024d5c0bc81845070f23598caf02f05b8ae88fd4ad2cd3e236ddbea833ad2" # from https://go.dev/dl +GO_VERSION="1.23.4" +GO_VERSION_SHA_arm64="87d2bb0ad4fe24d2a0685a55df321e0efe4296419a9b3de03369dbe60b8acd3a" # from https://go.dev/dl +GO_VERSION_SHA_amd64="6700067389a53a1607d30aa8d6e01d198230397029faa0b109e89bc871ab5a0e" # from https://go.dev/dl if [ "$ARCH" = 'arm64' ]; then GO_ARCH="darwin-arm64" diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index 15d82bc2eefbb..05d2632449d8e 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,7 +2,7 @@ set -eux -GO_VERSION="1.23.3" +GO_VERSION="1.23.4" setup_go () { choco upgrade golang --allow-downgrade --version=${GO_VERSION} From 993a99eaf7e667145e54430597a69d6d0a228ff3 Mon Sep 17 00:00:00 2001 From: tomas-quix <78492422+tomas-quix@users.noreply.github.com> Date: Fri, 6 Dec 2024 22:07:25 +0100 Subject: [PATCH 129/170] feat(outputs.quix): Add plugin (#16144) Co-authored-by: stereosky Co-authored-by: Sven Rebhan --- plugins/outputs/all/quix.go | 5 + plugins/outputs/quix/README.md | 58 ++++++++++ plugins/outputs/quix/config.go | 81 ++++++++++++++ plugins/outputs/quix/quix.go | 169 ++++++++++++++++++++++++++++ plugins/outputs/quix/quix_test.go | 180 ++++++++++++++++++++++++++++++ plugins/outputs/quix/sample.conf | 14 +++ 6 files changed, 507 insertions(+) create mode 100644 plugins/outputs/all/quix.go create mode 100644 plugins/outputs/quix/README.md create mode 100644 plugins/outputs/quix/config.go create mode 100644 plugins/outputs/quix/quix.go create mode 100644 plugins/outputs/quix/quix_test.go create mode 100644 plugins/outputs/quix/sample.conf diff --git a/plugins/outputs/all/quix.go b/plugins/outputs/all/quix.go new file mode 100644 index 0000000000000..97f559634a497 --- /dev/null +++ b/plugins/outputs/all/quix.go @@ -0,0 +1,5 @@ +//go:build !custom || outputs || outputs.quix + +package all + +import _ "github.com/influxdata/telegraf/plugins/outputs/quix" // register plugin diff --git a/plugins/outputs/quix/README.md b/plugins/outputs/quix/README.md new file mode 100644 index 0000000000000..870f7e36d4f70 --- /dev/null +++ b/plugins/outputs/quix/README.md @@ -0,0 +1,58 @@ +# Quix Output Plugin + +This plugin writes metrics to a [Quix][quix] endpoint. + +Please consult Quix's [official documentation][docs] for more details on the +Quix platform architecture and concepts. + +⭐ Telegraf v1.33.0 +🏷️ cloud, messaging +💻 all + +[quix]: https://quix.io +[docs]: https://quix.io/docs/ + +## Global configuration options + +In addition to the plugin-specific configuration settings, plugins support +additional global and plugin configuration settings. These settings are used to +modify metrics, tags, and field or create aliases and configure ordering, etc. +See the [CONFIGURATION.md][CONFIGURATION.md] for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Secret-store support + +This plugin supports secrets from secret-stores for the `token` option. +See the [secret-store documentation][SECRETSTORE] for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + +## Configuration + +```toml @sample.conf +# Send metrics to a Quix data processing pipeline +[[outputs.quix]] + ## Endpoint for providing the configuration + # url = "https://portal-api.platform.quix.io" + + ## Workspace and topics to send the metrics to + workspace = "your_workspace" + topic = "your_topic" + + ## Authentication token created in Quix + token = "your_auth_token" + + ## Amount of time allowed to complete the HTTP request for fetching the config + # timeout = "5s" +``` + +The plugin requires a [SDK token][token] for authentication with Quix. You can +generate the `token` in settings under the `API and tokens` section. + +Furthermore, the `workspace` parameter must be set to the `Workspace ID` or the +`Environment ID` of your Quix project. Those values can be found in settings +under the `General settings` section. + +[token]: https://quix.io/docs/develop/authentication/personal-access-token.html diff --git a/plugins/outputs/quix/config.go b/plugins/outputs/quix/config.go new file mode 100644 index 0000000000000..ed36f04ad1ba3 --- /dev/null +++ b/plugins/outputs/quix/config.go @@ -0,0 +1,81 @@ +package quix + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" +) + +type brokerConfig struct { + BootstrapServers string `json:"bootstrap.servers"` + SaslMechanism string `json:"sasl.mechanism"` + SaslUsername string `json:"sasl.username"` + SaslPassword string `json:"sasl.password"` + SecurityProtocol string `json:"security.protocol"` + SSLCertBase64 string `json:"ssl.ca.cert"` + + cert []byte +} + +func (q *Quix) fetchBrokerConfig() (*brokerConfig, error) { + // Create request + endpoint := fmt.Sprintf("%s/workspaces/%s/broker/librdkafka", q.APIURL, q.Workspace) + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, fmt.Errorf("creating request failed: %w", err) + } + + // Setup authentication + token, err := q.Token.Get() + if err != nil { + return nil, fmt.Errorf("getting token failed: %w", err) + } + req.Header.Set("Authorization", "Bearer "+token.String()) + req.Header.Set("Accept", "application/json") + token.Destroy() + + // Query the broker configuration from the Quix API + client, err := q.HTTPClientConfig.CreateClient(context.Background(), q.Log) + if err != nil { + return nil, fmt.Errorf("creating client failed: %w", err) + } + defer client.CloseIdleConnections() + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("executing request failed: %w", err) + } + defer resp.Body.Close() + + // Read the body as we need it both in case of an error as well as for + // decoding the config in case of success + body, err := io.ReadAll(resp.Body) + if err != nil { + q.Log.Errorf("Reading message body failed: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected response %q (%d): %s", + http.StatusText(resp.StatusCode), + resp.StatusCode, + string(body), + ) + } + + // Decode the broker and the returned certificate + var cfg brokerConfig + if err := json.Unmarshal(body, &cfg); err != nil { + return nil, fmt.Errorf("decoding body failed: %w", err) + } + + cert, err := base64.StdEncoding.DecodeString(cfg.SSLCertBase64) + if err != nil { + return nil, fmt.Errorf("decoding certificate failed: %w", err) + } + cfg.cert = cert + + return &cfg, nil +} diff --git a/plugins/outputs/quix/quix.go b/plugins/outputs/quix/quix.go new file mode 100644 index 0000000000000..4ecd43a4f9aee --- /dev/null +++ b/plugins/outputs/quix/quix.go @@ -0,0 +1,169 @@ +//go:generate ../../../tools/readme_config_includer/generator +package quix + +import ( + "crypto/tls" + "crypto/x509" + _ "embed" + "errors" + "fmt" + "strings" + "time" + + "github.com/IBM/sarama" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + common_http "github.com/influxdata/telegraf/plugins/common/http" + common_kafka "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/plugins/serializers/json" +) + +//go:embed sample.conf +var sampleConfig string + +type Quix struct { + APIURL string `toml:"url"` + Workspace string `toml:"workspace"` + Topic string `toml:"topic"` + Token config.Secret `toml:"token"` + Log telegraf.Logger `toml:"-"` + common_http.HTTPClientConfig + + producer sarama.SyncProducer + serializer serializers.Serializer + kakfaTopic string +} + +func (*Quix) SampleConfig() string { + return sampleConfig +} + +func (q *Quix) Init() error { + // Set defaults + if q.APIURL == "" { + q.APIURL = "https://portal-api.platform.quix.io" + } + q.APIURL = strings.TrimSuffix(q.APIURL, "/") + + // Check input parameters + if q.Topic == "" { + return errors.New("option 'topic' must be set") + } + if q.Workspace == "" { + return errors.New("option 'workspace' must be set") + } + if q.Token.Empty() { + return errors.New("option 'token' must be set") + } + q.kakfaTopic = q.Workspace + "-" + q.Topic + + // Create a JSON serializer for the output + q.serializer = &json.Serializer{ + TimestampUnits: config.Duration(time.Nanosecond), // Hardcoded nanoseconds precision + } + + return nil +} + +func (q *Quix) Connect() error { + // Fetch the Kafka broker configuration from the Quix HTTP endpoint + quixConfig, err := q.fetchBrokerConfig() + if err != nil { + return fmt.Errorf("fetching broker config failed: %w", err) + } + brokers := strings.Split(quixConfig.BootstrapServers, ",") + if len(brokers) == 0 { + return errors.New("no brokers received") + } + + // Setup the Kakfa producer config + cfg := sarama.NewConfig() + cfg.Producer.Return.Successes = true + + switch quixConfig.SecurityProtocol { + case "SASL_SSL": + cfg.Net.SASL.Enable = true + cfg.Net.SASL.User = quixConfig.SaslUsername + cfg.Net.SASL.Password = quixConfig.SaslPassword + cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 + cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common_kafka.XDGSCRAMClient{HashGeneratorFcn: common_kafka.SHA256} + } + + switch quixConfig.SaslMechanism { + case "SCRAM-SHA-512": + cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common_kafka.XDGSCRAMClient{HashGeneratorFcn: common_kafka.SHA512} + } + cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + case "SCRAM-SHA-256": + cfg.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 + cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common_kafka.XDGSCRAMClient{HashGeneratorFcn: common_kafka.SHA256} + } + case "PLAIN": + cfg.Net.SASL.Mechanism = sarama.SASLTypePlaintext + default: + return fmt.Errorf("unsupported SASL mechanism: %s", quixConfig.SaslMechanism) + } + + // Certificate + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(quixConfig.cert) { + return errors.New("appending CA cert to pool failed") + } + cfg.Net.TLS.Enable = true + cfg.Net.TLS.Config = &tls.Config{RootCAs: certPool} + case "PLAINTEXT": + // No additional configuration required for plaintext communication + default: + return fmt.Errorf("unsupported security protocol: %s", quixConfig.SecurityProtocol) + } + + // Setup the Kakfa producer itself + producer, err := sarama.NewSyncProducer(brokers, cfg) + if err != nil { + return fmt.Errorf("creating producer failed: %w", err) + } + q.producer = producer + + return nil +} + +func (q *Quix) Write(metrics []telegraf.Metric) error { + for _, m := range metrics { + serialized, err := q.serializer.Serialize(m) + if err != nil { + q.Log.Errorf("Error serializing metric: %v", err) + continue + } + + msg := &sarama.ProducerMessage{ + Topic: q.kakfaTopic, + Value: sarama.ByteEncoder(serialized), + Timestamp: m.Time(), + Key: sarama.StringEncoder("telegraf"), + } + + if _, _, err = q.producer.SendMessage(msg); err != nil { + q.Log.Errorf("Error sending message to Kafka: %v", err) + continue + } + } + + return nil +} + +func (q *Quix) Close() error { + if q.producer != nil { + return q.producer.Close() + } + return nil +} + +func init() { + outputs.Add("quix", func() telegraf.Output { return &Quix{} }) +} diff --git a/plugins/outputs/quix/quix_test.go b/plugins/outputs/quix/quix_test.go new file mode 100644 index 0000000000000..00726b3e6ff26 --- /dev/null +++ b/plugins/outputs/quix/quix_test.go @@ -0,0 +1,180 @@ +package quix + +import ( + "context" + "crypto/rand" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + kafkacontainer "github.com/testcontainers/testcontainers-go/modules/kafka" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +func TestMissingTopic(t *testing.T) { + plugin := &Quix{} + require.ErrorContains(t, plugin.Init(), "option 'topic' must be set") +} + +func TestMissingWorkspace(t *testing.T) { + plugin := &Quix{Topic: "foo"} + require.ErrorContains(t, plugin.Init(), "option 'workspace' must be set") +} + +func TestMissingToken(t *testing.T) { + plugin := &Quix{Topic: "foo", Workspace: "bar"} + require.ErrorContains(t, plugin.Init(), "option 'token' must be set") +} + +func TestDefaultURL(t *testing.T) { + plugin := &Quix{ + Topic: "foo", + Workspace: "bar", + Token: config.NewSecret([]byte("secret")), + } + require.NoError(t, plugin.Init()) + require.Equal(t, "https://portal-api.platform.quix.io", plugin.APIURL) +} + +func TestFetchingConfig(t *testing.T) { + // Setup HTTP test-server for providing the broker config + brokerCfg := []byte(` + { + "bootstrap.servers":"servers", + "sasl.mechanism":"mechanism", + "sasl.username":"user", + "sasl.password":"password", + "security.protocol":"protocol", + "ssl.ca.cert":"Y2VydA==" + } + `) + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/workspaces/bar/broker/librdkafka" { + w.WriteHeader(http.StatusNotFound) + return + } + if r.Header.Get("Authorization") != "Bearer bXkgc2VjcmV0" { + w.WriteHeader(http.StatusUnauthorized) + return + } + if r.Header.Get("Accept") != "application/json" { + w.WriteHeader(http.StatusUnsupportedMediaType) + return + } + if _, err := w.Write(brokerCfg); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } + }), + ) + defer server.Close() + + // Setup the plugin and fetch the config + plugin := &Quix{ + APIURL: server.URL, + Topic: "foo", + Workspace: "bar", + Token: config.NewSecret([]byte("bXkgc2VjcmV0")), + } + require.NoError(t, plugin.Init()) + + // Check the config + expected := &brokerConfig{ + BootstrapServers: "servers", + SaslMechanism: "mechanism", + SaslUsername: "user", + SaslPassword: "password", + SecurityProtocol: "protocol", + SSLCertBase64: "Y2VydA==", + cert: []byte("cert"), + } + cfg, err := plugin.fetchBrokerConfig() + require.NoError(t, err) + require.Equal(t, expected, cfg) +} + +func TestConnectAndWriteIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + // Setup common config params + workspace := "test" + topic := "telegraf" + + // Setup a kafka container + ctx := context.Background() + kafkaContainer, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0") + require.NoError(t, err) + defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored + + brokers, err := kafkaContainer.Brokers(ctx) + require.NoError(t, err) + + // Setup broker config distributed via HTTP + brokerCfg := &brokerConfig{ + BootstrapServers: strings.Join(brokers, ","), + SecurityProtocol: "PLAINTEXT", + } + response, err := json.Marshal(brokerCfg) + require.NoError(t, err) + + // Setup authentication + signingKey := make([]byte, 64) + _, err = rand.Read(signingKey) + require.NoError(t, err) + + tokenRaw := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Minute)), + Issuer: "quix test", + }) + token, err := tokenRaw.SignedString(signingKey) + require.NoError(t, err) + + // Setup HTTP test-server for providing the broker config + path := "/workspaces/" + workspace + "/broker/librdkafka" + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != path { + w.WriteHeader(http.StatusNotFound) + t.Logf("invalid path %q", r.URL.Path) + return + } + if r.Header.Get("Authorization") != "Bearer "+token { + w.WriteHeader(http.StatusUnauthorized) + return + } + if r.Header.Get("Accept") != "application/json" { + w.WriteHeader(http.StatusUnsupportedMediaType) + return + } + if _, err := w.Write(response); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } + }), + ) + defer server.Close() + + // Setup the plugin and establish connection + plugin := &Quix{ + APIURL: server.URL, + Workspace: workspace, + Topic: topic, + Token: config.NewSecret([]byte(token)), + } + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + defer plugin.Close() + + // Verify that we can successfully write data to the kafka broker + require.NoError(t, plugin.Write(testutil.MockMetrics())) +} diff --git a/plugins/outputs/quix/sample.conf b/plugins/outputs/quix/sample.conf new file mode 100644 index 0000000000000..4196b24cc9370 --- /dev/null +++ b/plugins/outputs/quix/sample.conf @@ -0,0 +1,14 @@ +# Send metrics to a Quix data processing pipeline +[[outputs.quix]] + ## Endpoint for providing the configuration + # url = "https://portal-api.platform.quix.io" + + ## Workspace and topics to send the metrics to + workspace = "your_workspace" + topic = "your_topic" + + ## Authentication token created in Quix + token = "your_auth_token" + + ## Amount of time allowed to complete the HTTP request for fetching the config + # timeout = "5s" \ No newline at end of file From 8f99423530757c24cb77945dddeb3157e9214a44 Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 9 Dec 2024 15:52:22 +0100 Subject: [PATCH 130/170] fix(outputs.quix): Replace deprecated serializer type --- plugins/outputs/quix/quix.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/outputs/quix/quix.go b/plugins/outputs/quix/quix.go index 4ecd43a4f9aee..ddfc402e46a66 100644 --- a/plugins/outputs/quix/quix.go +++ b/plugins/outputs/quix/quix.go @@ -17,7 +17,6 @@ import ( common_http "github.com/influxdata/telegraf/plugins/common/http" common_kafka "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/json" ) @@ -33,7 +32,7 @@ type Quix struct { common_http.HTTPClientConfig producer sarama.SyncProducer - serializer serializers.Serializer + serializer telegraf.Serializer kakfaTopic string } From 1dea1661284515270bf82e6169884935e2aa28e7 Mon Sep 17 00:00:00 2001 From: Mingyang Zheng Date: Mon, 9 Dec 2024 07:32:34 -0800 Subject: [PATCH 131/170] fix(logging): Fix deplicated prefix+attrMsg in log message when redirectLogger is used (#16274) --- logger/handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logger/handler.go b/logger/handler.go index 76bf64a9c32d8..f967ba2561a96 100644 --- a/logger/handler.go +++ b/logger/handler.go @@ -122,7 +122,7 @@ func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix str attrMsg = "(" + strings.Join(parts, ",") + ")" } - msg := []interface{}{ts.In(time.UTC).Format(time.RFC3339), level.Indicator(), prefix + attrMsg} + msg := []interface{}{ts.In(time.UTC).Format(time.RFC3339), level.Indicator()} if prefix+attrMsg != "" { msg = append(msg, prefix+attrMsg) } From bd067608972ccf1934e77486d920d0f0e54f6241 Mon Sep 17 00:00:00 2001 From: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:20:25 -0600 Subject: [PATCH 132/170] chore(inputs.prometheus): Improve label and field selector logging (#16228) --- plugins/inputs/prometheus/prometheus.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 1922407fd754e..191d27dd29a58 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -183,7 +183,12 @@ func (p *Prometheus) Init() error { return fmt.Errorf("the field selector %q is not supported for pods", invalidSelector) } - p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector) + if p.KubernetesLabelSelector != "" { + p.Log.Debugf("Using the label selector: %v", p.podLabelSelector) + } + if p.KubernetesFieldSelector != "" { + p.Log.Debugf("Using the field selector: %v", p.podFieldSelector) + } for k, vs := range p.NamespaceAnnotationPass { tagFilter := models.TagFilter{} From d4ff7d5f5d7fcc3b52f0ca77f63427b7269f2dae Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 9 Dec 2024 19:32:49 +0100 Subject: [PATCH 133/170] Update build version to 1.34.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index f0fed9186c9ff..2404d9590220e 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.33.0 \ No newline at end of file +1.34.0 \ No newline at end of file From 12946b889757b1a20bbc58f05971435ae7f36b71 Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 9 Dec 2024 19:38:55 +0100 Subject: [PATCH 134/170] Update changelog for v1.33.0 (cherry picked from commit 971e9e2631f2f8073a3c5b76225068f66eca81d5) --- CHANGELOG.md | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ceae796d6ef68..a022cbf4d0dac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,66 @@ # Changelog +## v1.33.0 [2024-12-09] + +### New Plugins + +- [#15754](https://github.com/influxdata/telegraf/pull/15754) `inputs.neoom_beaam` Add new plugin +- [#15869](https://github.com/influxdata/telegraf/pull/15869) `processors.batch` Add batch processor +- [#16144](https://github.com/influxdata/telegraf/pull/16144) `outputs.quix` Add plugin + +### Features + +- [#16010](https://github.com/influxdata/telegraf/pull/16010) `agent` Add --watch-interval option for polling config changes +- [#15948](https://github.com/influxdata/telegraf/pull/15948) `aggregators.basicstats` Add first field +- [#15891](https://github.com/influxdata/telegraf/pull/15891) `common.socket` Allow parallel parsing with a pool of workers +- [#16141](https://github.com/influxdata/telegraf/pull/16141) `inputs.amqp_consumer` Allow specification of queue arguments +- [#15950](https://github.com/influxdata/telegraf/pull/15950) `inputs.diskio` Add field io await and util +- [#15919](https://github.com/influxdata/telegraf/pull/15919) `inputs.kafka_consumer` Implement startup error behavior options +- [#15910](https://github.com/influxdata/telegraf/pull/15910) `inputs.memcached` Add support for external-store metrics +- [#15990](https://github.com/influxdata/telegraf/pull/15990) `inputs.mock` Add sine phase +- [#16040](https://github.com/influxdata/telegraf/pull/16040) `inputs.modbus` Allow grouping across register types +- [#15865](https://github.com/influxdata/telegraf/pull/15865) `inputs.prometheus` Allow to use secrets for credentials +- [#16230](https://github.com/influxdata/telegraf/pull/16230) `inputs.smart` Add Power on Hours and Cycle Count +- [#15935](https://github.com/influxdata/telegraf/pull/15935) `inputs.snmp` Add displayhint conversion +- [#16027](https://github.com/influxdata/telegraf/pull/16027) `inputs.snmp` Convert uneven bytes to int +- [#15976](https://github.com/influxdata/telegraf/pull/15976) `inputs.socket_listener` Use reception time as timestamp +- [#15853](https://github.com/influxdata/telegraf/pull/15853) `inputs.statsd` Allow reporting sets and timings count as floats +- [#11591](https://github.com/influxdata/telegraf/pull/11591) `inputs.vsphere` Add VM memory configuration +- [#16109](https://github.com/influxdata/telegraf/pull/16109) `inputs.vsphere` Add cpu temperature field +- [#15917](https://github.com/influxdata/telegraf/pull/15917) `inputs` Add option to choose the metric time source +- [#16242](https://github.com/influxdata/telegraf/pull/16242) `logging` Allow overriding message key for structured logging +- [#15742](https://github.com/influxdata/telegraf/pull/15742) `outputs.influxdb_v2` Add rate limit implementation +- [#15943](https://github.com/influxdata/telegraf/pull/15943) `outputs.mqtt` Add sprig functions for topic name generator +- [#16041](https://github.com/influxdata/telegraf/pull/16041) `outputs.postgresql` Allow limiting of column name length +- [#16258](https://github.com/influxdata/telegraf/pull/16258) `outputs` Add rate-limiting infrastructure +- [#16146](https://github.com/influxdata/telegraf/pull/16146) `outputs` Implement partial write errors +- [#15883](https://github.com/influxdata/telegraf/pull/15883) `outputs` Only copy metric if its not filtered out +- [#15893](https://github.com/influxdata/telegraf/pull/15893) `serializers.prometheusremotewrite` Log metric conversion errors + +### Bugfixes + +- [#16248](https://github.com/influxdata/telegraf/pull/16248) `inputs.netflow` Decode flags in TCP and IP headers correctly +- [#16257](https://github.com/influxdata/telegraf/pull/16257) `inputs.procstat` Handle running processes correctly across multiple filters +- [#16219](https://github.com/influxdata/telegraf/pull/16219) `logging` Add Close() func for redirectLogger +- [#16255](https://github.com/influxdata/telegraf/pull/16255) `logging` Clean up extra empty spaces when redirectLogger is used +- [#16274](https://github.com/influxdata/telegraf/pull/16274) `logging` Fix duplicated prefix and attrMsg in log message when redirectLogger is used + +### Dependency Updates + +- [#16232](https://github.com/influxdata/telegraf/pull/16232) `deps` Bump cloud.google.com/go/bigquery from 1.63.1 to 1.64.0 +- [#16235](https://github.com/influxdata/telegraf/pull/16235) `deps` Bump cloud.google.com/go/storage from 1.43.0 to 1.47.0 +- [#16198](https://github.com/influxdata/telegraf/pull/16198) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.42.2 to 1.43.1 +- [#16234](https://github.com/influxdata/telegraf/pull/16234) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.29.3 to 1.32.6 +- [#16201](https://github.com/influxdata/telegraf/pull/16201) `deps` Bump github.com/intel/powertelemetry from 1.0.1 to 1.0.2 +- [#16200](https://github.com/influxdata/telegraf/pull/16200) `deps` Bump github.com/rclone/rclone from 1.68.1 to 1.68.2 +- [#16199](https://github.com/influxdata/telegraf/pull/16199) `deps` Bump github.com/vishvananda/netns from 0.0.4 to 0.0.5 +- [#16236](https://github.com/influxdata/telegraf/pull/16236) `deps` Bump golang.org/x/net from 0.30.0 to 0.31.0 +- [#16250](https://github.com/influxdata/telegraf/pull/16250) `deps` Bump golangci-lint from v1.62.0 to v1.62.2 +- [#16233](https://github.com/influxdata/telegraf/pull/16233) `deps` Bump google.golang.org/grpc from 1.67.1 to 1.68.0 +- [#16202](https://github.com/influxdata/telegraf/pull/16202) `deps` Bump modernc.org/sqlite from 1.33.1 to 1.34.1 +- [#16203](https://github.com/influxdata/telegraf/pull/16203) `deps` Bump super-linter/super-linter from 7.1.0 to 7.2.0 + ## v1.32.3 [2024-11-18] ### Important Changes From 119329c9d1b13874ac384e096e0ec3162cd82eb4 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Tue, 10 Dec 2024 11:22:30 +0800 Subject: [PATCH 135/170] feat(outputs): Fix go mod error --- go.sum | 8 -------- 1 file changed, 8 deletions(-) diff --git a/go.sum b/go.sum index 8b87a9e19d693..b9102cd078595 100644 --- a/go.sum +++ b/go.sum @@ -2561,8 +2561,6 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2718,8 +2716,6 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2908,8 +2904,6 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2930,8 +2924,6 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From edaed1cca926d6e9a3d94eff2aed749ac6863af9 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Tue, 10 Dec 2024 11:35:03 +0800 Subject: [PATCH 136/170] feat(outputs): Fix serializer type error --- plugins/outputs/inlong/inlong.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index 004e1db835cd0..eb315eee77cb1 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -10,7 +10,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" ) //go:embed sample.conf @@ -26,14 +25,14 @@ type Inlong struct { producerFunc func(groupId string, managerUrl string) (dataproxy.Client, error) producer dataproxy.Client - serializer serializers.Serializer + serializer telegraf.Serializer } func (i *Inlong) SampleConfig() string { return sampleConfig } -func (i *Inlong) SetSerializer(serializer serializers.Serializer) { +func (i *Inlong) SetSerializer(serializer telegraf.Serializer) { i.serializer = serializer } From ef0b969fb258565ca92dc1836c7e57df2f4b34c7 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Thu, 12 Dec 2024 10:52:25 +0800 Subject: [PATCH 137/170] Update plugins/outputs/inlong/README.md Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- plugins/outputs/inlong/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/inlong/README.md b/plugins/outputs/inlong/README.md index a782875d2fda6..0467f36c4e4f8 100644 --- a/plugins/outputs/inlong/README.md +++ b/plugins/outputs/inlong/README.md @@ -31,7 +31,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - ## Suggest using CSV format here, as Inlong is also processed in CSV format data_format = "csv" ## The delimiter used when serializing data in CSV format needs to be consistent with the delimiter From 33680b99fe7f003fec1d90cc1667460d791fa2e5 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 12 Dec 2024 11:03:35 +0800 Subject: [PATCH 138/170] feat(outputs): Modify code based on comments --- plugins/outputs/inlong/inlong.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/inlong/inlong.go b/plugins/outputs/inlong/inlong.go index eb315eee77cb1..3817bb9e2d04e 100644 --- a/plugins/outputs/inlong/inlong.go +++ b/plugins/outputs/inlong/inlong.go @@ -15,7 +15,7 @@ import ( //go:embed sample.conf var sampleConfig string -const ManagerURLSuffix = "/inlong/manager/openapi/dataproxy/getIpList" +const managerURLSuffix = "/inlong/manager/openapi/dataproxy/getIpList" type Inlong struct { GroupID string `toml:"group_id"` @@ -37,7 +37,7 @@ func (i *Inlong) SetSerializer(serializer telegraf.Serializer) { } func (i *Inlong) Connect() error { - producer, err := i.producerFunc(i.GroupID, i.ManagerURL+ManagerURLSuffix) + producer, err := i.producerFunc(i.GroupID, i.ManagerURL+managerURLSuffix) if err != nil { return &internal.StartupError{Err: err, Retry: true} } @@ -71,12 +71,12 @@ func (i *Inlong) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("inlong", func() telegraf.Output { return &Inlong{ - producerFunc: NewProducer, + producerFunc: newProducer, } }) } -func NewProducer(groupID, managerURL string) (dataproxy.Client, error) { +func newProducer(groupID, managerURL string) (dataproxy.Client, error) { producer, err := dataproxy.NewClient( dataproxy.WithGroupID(groupID), dataproxy.WithURL(managerURL), From 50194b537e324f49dea015c08ed09c5748ff6d23 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:24:36 +0100 Subject: [PATCH 139/170] chore: Update link to release calendar (#16278) --- docs/FAQ.md | 2 +- docs/RELEASES.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index 044e68ab4545c..4016ebc97b9b4 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -15,7 +15,7 @@ new features are held for the next minor release. Users can view what [GitHub milestones][] a PR belongs to to determine the release it will go out with. -[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_1ikq7u4f5c4o6mh9ep4duo3avk%40group.calendar.google.com +[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_03d981cefd8d6432894cb162da5c6186e393bc0f970ca6c371201aa05d30d763%40group.calendar.google.com [GitHub milestones]: https://github.com/influxdata/telegraf/milestones ## How can I filter or select specific metrics? diff --git a/docs/RELEASES.md b/docs/RELEASES.md index 9b0b1a26ab7e5..233cb2acc0907 100644 --- a/docs/RELEASES.md +++ b/docs/RELEASES.md @@ -19,5 +19,5 @@ new features are held for the next minor release. Users can view what [GitHub milestones][] a PR belongs to when they want to determine the release it will go out with. -[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_1ikq7u4f5c4o6mh9ep4duo3avk%40group.calendar.google.com +[Google Calendar]: https://calendar.google.com/calendar/embed?src=c_03d981cefd8d6432894cb162da5c6186e393bc0f970ca6c371201aa05d30d763%40group.calendar.google.com [GitHub milestones]: https://github.com/influxdata/telegraf/milestones From 2c828f25f92fa6efca4127d79f58341bc399b1ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:25:05 -0600 Subject: [PATCH 140/170] chore(deps): Bump github.com/aws/aws-sdk-go-v2/config from 1.27.39 to 1.28.6 (#16280) --- go.mod | 22 +++++++++++----------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index 66bab6f6210f7..4a44555a9d8b1 100644 --- a/go.mod +++ b/go.mod @@ -45,16 +45,16 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/awnumar/memguard v0.22.5 - github.com/aws/aws-sdk-go-v2 v1.32.5 - github.com/aws/aws-sdk-go-v2/config v1.27.39 - github.com/aws/aws-sdk-go-v2/credentials v1.17.44 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 + github.com/aws/aws-sdk-go-v2 v1.32.6 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.43.1 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.38.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.36.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.6 - github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 github.com/aws/smithy-go v1.22.1 github.com/benbjohnson/clock v1.3.5 @@ -285,19 +285,19 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect diff --git a/go.sum b/go.sum index b9102cd078595..c15dad2394858 100644 --- a/go.sum +++ b/go.sum @@ -874,34 +874,34 @@ github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.32.5 h1:U8vdWJuY7ruAkzaOdD7guwJjD06YSKmnKCJs7s3IkIo= -github.com/aws/aws-sdk-go-v2 v1.32.5/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= +github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= -github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= -github.com/aws/aws-sdk-go-v2/config v1.27.39/go.mod h1:wczj2hbyskP4LjMKBEZwPRO1shXY+GsQleab+ZXT2ik= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.44 h1:qqfs5kulLUHUEXlHEZXLJkgGoF3kkUeFUTVA585cFpU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.44/go.mod h1:0Lm2YJ8etJdEdw23s+q/9wTpOeo2HhNE97XcRa7T8MA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7 h1:FZB15YK2h/l2wO9YXvXr7/mZ5uOJIsLNZIePlHarAwg= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.7/go.mod h1:xTMr0gSUW6H6nJJVV257wWlk9257DwZ7EFhPFn3itgo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 h1:woXadbf0c7enQ2UGCi8gW/WuKmE0xIzxBF/eD94jMKQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19/go.mod h1:zminj5ucw7w0r65bP6nhyOd3xL6veAUMc3ElGMoLVb4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 h1:4usbeaes3yJnCFC7kfeyhkdkPtoRYPa/hTmCqMpKpLI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24/go.mod h1:5CI1JemjVwde8m2WG3cz23qHKPOxbpkq0HaoreEgLIY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 h1:N1zsICrQglfzaBnrfM0Ys00860C+QFwu6u/5+LomP+o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24/go.mod h1:dCn9HbJ8+K31i8IQ8EWmWj0EiIk0+vKiHNMxTTYveAg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= @@ -921,8 +921,8 @@ github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1/go.mod h1:ifHRXsCyL github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1 h1:2ZzpXgkh4qmsexltvLVIaC4+HdN3oe6OWK6Upc4Qz/0= github.com/aws/aws-sdk-go-v2/service/ec2 v1.162.1/go.mod h1:eu3DWRK5GBq4hjCr7nAbnQiHSan5RJ6ue3qQVp5PJs0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= @@ -930,8 +930,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.2 h1:1G7T github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.2/go.mod h1:+ybYGLXoF7bcD7wIcMcklxyABZQmuBf1cHUhvY6FGIo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 h1:tHxQi/XHPK0ctd/wdOw0t7Xrc2OxcRCnVzv8lwWPu0c= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4GQbF1vJzG60poZqWatZlhP31y8PGCCVTvIGPdaaYJ0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= @@ -941,15 +941,15 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 h1:HJwZwRt2Z2Tdec+m+fPjvdmkq2s9Ra+VR0hjF7V2o40= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.5/go.mod h1:wrMCEwjFPms+V86TCQQeOxQF/If4vT44FGIOFiMC2ck= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 h1:zcx9LiGWZ6i6pjdcoE9oXAB6mUdeyC36Ia/QEiIvYdg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4/go.mod h1:Tp/ly1cTjRLGBBmNccFumbZ8oqpZlpdhFf80SrRh4is= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 h1:yDxvkz3/uOKfxnv8YhzOi9m+2OGIxF+on3KOISbK5IU= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.4/go.mod h1:9XEUty5v5UAsMiFOBJrNibZgwCeOma73jgGwwhgffa8= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4 h1:glNNLfVzW88jz83oPZ4gXndJL7VDDANHowCoJU673OU= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.4/go.mod h1:VUHrcV1XoUd6ZWzIMal9CeAA2EiKkAhmImuRGhNbaxg= github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= From 59b27c448cc5437eff9a96d526a8200c161f3f27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:25:29 -0600 Subject: [PATCH 141/170] chore(deps): Bump cloud.google.com/go/monitoring from 1.21.1 to 1.22.0 (#16283) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4a44555a9d8b1..d165e6df044a2 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.23.0 require ( cloud.google.com/go/bigquery v1.64.0 - cloud.google.com/go/monitoring v1.21.1 + cloud.google.com/go/monitoring v1.22.0 cloud.google.com/go/pubsub v1.45.1 cloud.google.com/go/storage v1.47.0 collectd.org v0.6.0 diff --git a/go.sum b/go.sum index c15dad2394858..8ca7b75724514 100644 --- a/go.sum +++ b/go.sum @@ -387,8 +387,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= -cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= +cloud.google.com/go/monitoring v1.22.0 h1:mQ0040B7dpuRq1+4YiQD43M2vW9HgoVxY98xhqGT+YI= +cloud.google.com/go/monitoring v1.22.0/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= From 414ff065e0a21c8fb1345e167b004a1cc12c0c9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:25:50 -0600 Subject: [PATCH 142/170] chore(deps): Bump github.com/nats-io/nats.go from 1.36.0 to 1.37.0 (#16282) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d165e6df044a2..653f4dc74cd65 100644 --- a/go.mod +++ b/go.mod @@ -146,7 +146,7 @@ require ( github.com/moby/ipvs v1.1.0 github.com/multiplay/go-ts3 v1.2.0 github.com/nats-io/nats-server/v2 v2.10.17 - github.com/nats-io/nats.go v1.36.0 + github.com/nats-io/nats.go v1.37.0 github.com/netsampler/goflow2/v2 v2.2.1 github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1 github.com/nsqio/go-nsq v1.1.0 diff --git a/go.sum b/go.sum index 8ca7b75724514..0144bc0aee861 100644 --- a/go.sum +++ b/go.sum @@ -1960,8 +1960,8 @@ github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c= github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A= github.com/nats-io/nats-server/v2 v2.10.17 h1:PTVObNBD3TZSNUDgzFb1qQsQX4mOgFmOuG9vhT+KBUY= github.com/nats-io/nats-server/v2 v2.10.17/go.mod h1:5OUyc4zg42s/p2i92zbbqXvUNsbF0ivdTLKshVMn2YQ= -github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU= -github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= +github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= From 49a4d62b94b602ab582a87c1e64e9db1f2ce96b1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:26:14 -0600 Subject: [PATCH 143/170] chore(deps): Bump k8s.io/client-go from 0.30.1 to 0.31.3 (#16281) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 653f4dc74cd65..92d3e61b88e52 100644 --- a/go.mod +++ b/go.mod @@ -231,9 +231,9 @@ require ( gopkg.in/olivere/elastic.v5 v5.0.86 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.30.1 - k8s.io/apimachinery v0.31.1 - k8s.io/client-go v0.30.1 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 layeh.com/radius v0.0.0-20221205141417-e7fbddd11d68 modernc.org/sqlite v1.34.1 ) diff --git a/go.sum b/go.sum index 0144bc0aee861..75fee2cd2ac48 100644 --- a/go.sum +++ b/go.sum @@ -3403,12 +3403,12 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= -k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= From 1185c964dd411697605cd3fe469150f7d929bcda Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 10 Dec 2024 22:13:42 +0100 Subject: [PATCH 144/170] fix(outputs.remotefile): Handle tracking metrics correctly (#16289) --- plugins/outputs/remotefile/remotefile.go | 7 +- plugins/outputs/remotefile/remotefile_test.go | 128 ++++++++++++++++++ 2 files changed, 134 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/remotefile/remotefile.go b/plugins/outputs/remotefile/remotefile.go index 9db5c039368d4..a30badf10c6c6 100644 --- a/plugins/outputs/remotefile/remotefile.go +++ b/plugins/outputs/remotefile/remotefile.go @@ -177,7 +177,12 @@ func (f *File) Write(metrics []telegraf.Metric) error { // Group the metrics per output file groups := make(map[string][]telegraf.Metric) - for _, m := range metrics { + for _, raw := range metrics { + m := raw + if wm, ok := raw.(telegraf.UnwrappableMetric); ok { + m = wm.Unwrap() + } + for _, tmpl := range f.templates { buf.Reset() if err := tmpl.Execute(&buf, m); err != nil { diff --git a/plugins/outputs/remotefile/remotefile_test.go b/plugins/outputs/remotefile/remotefile_test.go index 70ba9a919167e..cdd5b51617967 100644 --- a/plugins/outputs/remotefile/remotefile_test.go +++ b/plugins/outputs/remotefile/remotefile_test.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" "strings" + "sync" "testing" "time" @@ -393,3 +394,130 @@ func TestForgettingFiles(t *testing.T) { require.Len(t, plugin.serializers, 1) require.Contains(t, plugin.serializers, "test-b.csv") } + +func TestTrackingMetrics(t *testing.T) { + // see issue #16045 + inputRaw := []telegraf.Metric{ + metric.New( + "test", + map[string]string{"source": "localhost"}, + map[string]interface{}{"value": 23}, + time.Unix(1719410465, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 21}, + time.Unix(1719410465, 0), + ), + metric.New( + "test", + map[string]string{"source": "localhost"}, + map[string]interface{}{"value": 42}, + time.Unix(1719410485, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 66}, + time.Unix(1719410485, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 55}, + time.Unix(1716310124, 0), + ), + metric.New( + "test", + map[string]string{"source": "remotehost"}, + map[string]interface{}{"value": 1}, + time.Unix(1716310174, 0), + ), + } + + // Create tracking metrics as inputs for the test + var mu sync.Mutex + delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw)) + notify := func(di telegraf.DeliveryInfo) { + mu.Lock() + defer mu.Unlock() + delivered = append(delivered, di) + } + input := make([]telegraf.Metric, 0, len(inputRaw)) + for _, m := range inputRaw { + tm, _ := metric.WithTracking(m, notify) + input = append(input, tm) + } + + // Create the expectations + expected := map[string][]string{ + "localhost-2024-06-26": { + "test,source=localhost value=23i 1719410465000000000\n", + "test,source=localhost value=42i 1719410485000000000\n", + }, + "remotehost-2024-06-26": { + "test,source=remotehost value=21i 1719410465000000000\n", + "test,source=remotehost value=66i 1719410485000000000\n", + }, + "remotehost-2024-05-21": { + "test,source=remotehost value=55i 1716310124000000000\n", + "test,source=remotehost value=1i 1716310174000000000\n", + }, + } + + // Prepare the output filesystem + tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + // Setup the plugin including the serializer + plugin := &File{ + Remote: config.NewSecret([]byte("local:" + tmpdir)), + Files: []string{`{{.Tag "source"}}-{{.Time.Format "2006-01-02"}}`}, + WriteBackInterval: config.Duration(100 * time.Millisecond), + Log: &testutil.Logger{}, + } + + plugin.SetSerializerFunc(func() (telegraf.Serializer, error) { + serializer := &influx.Serializer{} + err := serializer.Init() + return serializer, err + }) + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + defer plugin.Close() + + // Write the metrics and wait for the data to settle to disk + require.NoError(t, plugin.Write(input)) + require.Eventually(t, func() bool { + ok := true + for fn := range expected { + _, err := os.Stat(filepath.Join(tmpdir, fn)) + ok = ok && err == nil + } + return ok + }, 5*time.Second, 100*time.Millisecond) + + // Check the result + for fn, lines := range expected { + tmpfn := filepath.Join(tmpdir, fn) + require.FileExists(t, tmpfn) + + actual, err := os.ReadFile(tmpfn) + require.NoError(t, err) + require.Equal(t, strings.Join(lines, ""), string(actual)) + } + + // Simulate output acknowledging delivery + for _, m := range input { + m.Accept() + } + + // Check delivery + require.Eventuallyf(t, func() bool { + mu.Lock() + defer mu.Unlock() + return len(input) == len(delivered) + }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) +} From c1ed77d5cba40b4de576f2709fb439791d841803 Mon Sep 17 00:00:00 2001 From: Dan Fuchs <330402+fajpunk@users.noreply.github.com> Date: Tue, 10 Dec 2024 15:14:58 -0600 Subject: [PATCH 145/170] feat(parsers.avro): Allow union fields to be specified as tags (#16272) --- plugins/parsers/avro/parser.go | 82 +++++++++++-------- .../testcases/union-nullable-tag/expected.out | 1 + .../testcases/union-nullable-tag/message.json | 14 ++++ .../union-nullable-tag/telegraf.conf | 27 ++++++ 4 files changed, 89 insertions(+), 35 deletions(-) create mode 100644 plugins/parsers/avro/testcases/union-nullable-tag/expected.out create mode 100644 plugins/parsers/avro/testcases/union-nullable-tag/message.json create mode 100644 plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf diff --git a/plugins/parsers/avro/parser.go b/plugins/parsers/avro/parser.go index 1f9a911f8f61f..6735eeb3499df 100644 --- a/plugins/parsers/avro/parser.go +++ b/plugins/parsers/avro/parser.go @@ -180,6 +180,41 @@ func (p *Parser) flattenField(fldName string, fldVal map[string]interface{}) map return ret } +func (p *Parser) flattenItem(fld string, fldVal interface{}) (map[string]interface{}, error) { + sep := flatten.SeparatorStyle{ + Before: "", + Middle: p.FieldSeparator, + After: "", + } + candidate := make(map[string]interface{}) + candidate[fld] = fldVal + + var flat map[string]interface{} + var err error + // Exactly how we flatten is decided by p.UnionMode + if p.UnionMode == "flatten" { + flat, err = flatten.Flatten(candidate, "", sep) + if err != nil { + return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) + } + } else { + // "nullable" or "any" + typedVal, ok := candidate[fld].(map[string]interface{}) + if !ok { + // the "key" is not a string, so ... + // most likely an array? Do the default thing + // and flatten the candidate. + flat, err = flatten.Flatten(candidate, "", sep) + if err != nil { + return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) + } + } else { + flat = p.flattenField(fld, typedVal) + } + } + return flat, nil +} + func (p *Parser) createMetric(data map[string]interface{}, schema string) (telegraf.Metric, error) { // Tags differ from fields, in that tags are inherently strings. // fields can be of any type. @@ -193,12 +228,18 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg // Avro doesn't have a Tag/Field distinction, so we have to tell // Telegraf which items are our tags. for _, tag := range p.Tags { - sTag, err := internal.ToString(data[tag]) - if err != nil { - p.Log.Warnf("Could not convert %v to string for tag %q: %v", data[tag], tag, err) - continue + flat, flattenErr := p.flattenItem(tag, data[tag]) + if flattenErr != nil { + return nil, fmt.Errorf("flatten tag %q failed: %w", tag, flattenErr) + } + for k, v := range flat { + sTag, stringErr := internal.ToString(v) + if stringErr != nil { + p.Log.Warnf("Could not convert %v to string for tag %q: %v", data[tag], tag, stringErr) + continue + } + tags[k] = sTag } - tags[tag] = sTag } var fieldList []string if len(p.Fields) != 0 { @@ -215,37 +256,8 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg } // We need to flatten out our fields. The default (the separator // string is empty) is equivalent to what streamreactor does. - sep := flatten.SeparatorStyle{ - Before: "", - Middle: p.FieldSeparator, - After: "", - } for _, fld := range fieldList { - candidate := make(map[string]interface{}) - candidate[fld] = data[fld] // 1-item map - var flat map[string]interface{} - var err error - // Exactly how we flatten is decided by p.UnionMode - if p.UnionMode == "flatten" { - flat, err = flatten.Flatten(candidate, "", sep) - if err != nil { - return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) - } - } else { - // "nullable" or "any" - typedVal, ok := candidate[fld].(map[string]interface{}) - if !ok { - // the "key" is not a string, so ... - // most likely an array? Do the default thing - // and flatten the candidate. - flat, err = flatten.Flatten(candidate, "", sep) - if err != nil { - return nil, fmt.Errorf("flatten candidate %q failed: %w", candidate, err) - } - } else { - flat = p.flattenField(fld, typedVal) - } - } + flat, err := p.flattenItem(fld, data[fld]) if err != nil { return nil, fmt.Errorf("flatten field %q failed: %w", fld, err) } diff --git a/plugins/parsers/avro/testcases/union-nullable-tag/expected.out b/plugins/parsers/avro/testcases/union-nullable-tag/expected.out new file mode 100644 index 0000000000000..b4a55b5081166 --- /dev/null +++ b/plugins/parsers/avro/testcases/union-nullable-tag/expected.out @@ -0,0 +1 @@ +Switch,switch_wwn=10:00:50:EB:1A:0B:84:3A,some_union_in_a_tag=some_value statistics_collection_time=1682509200092i,up_time=1166984904i,memory_utilization=20.0 1682509200092000 diff --git a/plugins/parsers/avro/testcases/union-nullable-tag/message.json b/plugins/parsers/avro/testcases/union-nullable-tag/message.json new file mode 100644 index 0000000000000..413cb9a9f72b0 --- /dev/null +++ b/plugins/parsers/avro/testcases/union-nullable-tag/message.json @@ -0,0 +1,14 @@ +{ + "some_union_in_a_tag": { + "string": "some_value" + }, + "switch_wwn": "10:00:50:EB:1A:0B:84:3A", + "statistics_collection_time": 1682509200092, + "up_time": 1166984904, + "cpu_utilization": { + "null": null + }, + "memory_utilization": { + "float": 20.0 + } +} diff --git a/plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf b/plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf new file mode 100644 index 0000000000000..1e6b92bbc6ac7 --- /dev/null +++ b/plugins/parsers/avro/testcases/union-nullable-tag/telegraf.conf @@ -0,0 +1,27 @@ +[[ inputs.file ]] + files = ["./testcases/union-nullable-tag/message.json"] + data_format = "avro" + + avro_format = "json" + avro_measurement = "Switch" + avro_tags = ["switch_wwn", "some_union_in_a_tag"] + avro_fields = ["up_time", "cpu_utilization", "memory_utilization", "statistics_collection_time"] + avro_timestamp = "statistics_collection_time" + avro_timestamp_format = "unix_ms" + avro_union_mode = "nullable" + avro_schema = ''' + { + "namespace": "com.brocade.streaming", + "name": "fibrechannel_switch_statistics", + "type": "record", + "version": "1", + "fields": [ + {"name": "some_union_in_a_tag", "type": ["null", "string"], "default": null, "doc": "Some union that is used in a tag"}, + {"name": "switch_wwn", "type": "string", "doc": "WWN of the Physical Switch."}, + {"name": "statistics_collection_time", "type": "long", "doc": "Epoch time when statistics is collected."}, + {"name": "up_time", "type": "long", "doc": "Switch Up Time (in hundredths of a second)"}, + {"name": "cpu_utilization", "type": ["null","float"], "default": null, "doc": "CPU Utilization in %"}, + {"name": "memory_utilization", "type": ["null", "float"], "default": null, "doc": "Memory Utilization in %"} + ] + } + ''' From 86e9a1ee68032438627e6bc2b9b298160d2f0d84 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 11 Dec 2024 21:25:03 +0100 Subject: [PATCH 146/170] chore(inputs.kinesis_consumer): Cleanup code (#16267) Co-authored-by: Dane Strandboge <136023093+DStrand1@users.noreply.github.com> --- plugins/inputs/kinesis_consumer/encoding.go | 45 +++ .../kinesis_consumer/kinesis_consumer.go | 250 ++++++---------- .../kinesis_consumer/kinesis_consumer_test.go | 277 +++++++----------- plugins/inputs/kinesis_consumer/logging.go | 27 ++ plugins/inputs/kinesis_consumer/noop_store.go | 7 + 5 files changed, 274 insertions(+), 332 deletions(-) create mode 100644 plugins/inputs/kinesis_consumer/encoding.go create mode 100644 plugins/inputs/kinesis_consumer/logging.go create mode 100644 plugins/inputs/kinesis_consumer/noop_store.go diff --git a/plugins/inputs/kinesis_consumer/encoding.go b/plugins/inputs/kinesis_consumer/encoding.go new file mode 100644 index 0000000000000..d2bad6fd8301d --- /dev/null +++ b/plugins/inputs/kinesis_consumer/encoding.go @@ -0,0 +1,45 @@ +package kinesis_consumer + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "fmt" + "io" +) + +type decodingFunc func([]byte) ([]byte, error) + +func processGzip(data []byte) ([]byte, error) { + zipData, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zipData.Close() + return io.ReadAll(zipData) +} + +func processZlib(data []byte) ([]byte, error) { + zlibData, err := zlib.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zlibData.Close() + return io.ReadAll(zlibData) +} + +func processNoOp(data []byte) ([]byte, error) { + return data, nil +} + +func getDecodingFunc(encoding string) (decodingFunc, error) { + switch encoding { + case "gzip": + return processGzip, nil + case "zlib": + return processZlib, nil + case "none", "identity", "": + return processNoOp, nil + } + return nil, fmt.Errorf("unknown content encoding %q", encoding) +} diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 819a36b0a33da..4c65aadef41fa 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -2,23 +2,15 @@ package kinesis_consumer import ( - "bytes" - "compress/gzip" - "compress/zlib" "context" _ "embed" "errors" - "fmt" - "io" - "math/big" - "strings" "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/aws/aws-sdk-go-v2/service/kinesis" - "github.com/aws/smithy-go/logging" consumer "github.com/harlow/kinesis-consumer" "github.com/harlow/kinesis-consumer/store/ddb" @@ -31,86 +23,85 @@ import ( //go:embed sample.conf var sampleConfig string -var ( - once sync.Once - // this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html - maxSeq = strToBint(strings.Repeat("9", 129)) - negOne *big.Int -) - -const ( - defaultMaxUndeliveredMessages = 1000 -) - -type ( - KinesisConsumer struct { - StreamName string `toml:"streamname"` - ShardIteratorType string `toml:"shard_iterator_type"` - DynamoDB *dynamoDB `toml:"checkpoint_dynamodb"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - ContentEncoding string `toml:"content_encoding"` - - Log telegraf.Logger `toml:"-"` - - cons *consumer.Consumer - parser telegraf.Parser - cancel context.CancelFunc - acc telegraf.TrackingAccumulator - sem chan struct{} +var once sync.Once + +type KinesisConsumer struct { + StreamName string `toml:"streamname"` + ShardIteratorType string `toml:"shard_iterator_type"` + DynamoDB *dynamoDB `toml:"checkpoint_dynamodb"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + ContentEncoding string `toml:"content_encoding"` + Log telegraf.Logger `toml:"-"` + common_aws.CredentialConfig + + cons *consumer.Consumer + parser telegraf.Parser + cancel context.CancelFunc + acc telegraf.TrackingAccumulator + sem chan struct{} + + checkpoint consumer.Store + checkpoints map[string]checkpoint + records map[telegraf.TrackingID]string + checkpointTex sync.Mutex + recordsTex sync.Mutex + wg sync.WaitGroup + + contentDecodingFunc decodingFunc + + lastSeqNum string +} - checkpoint consumer.Store - checkpoints map[string]checkpoint - records map[telegraf.TrackingID]string - checkpointTex sync.Mutex - recordsTex sync.Mutex - wg sync.WaitGroup +type dynamoDB struct { + AppName string `toml:"app_name"` + TableName string `toml:"table_name"` +} - processContentEncodingFunc processContent +type checkpoint struct { + streamName string + shardID string +} - lastSeqNum *big.Int +func (*KinesisConsumer) SampleConfig() string { + return sampleConfig +} - common_aws.CredentialConfig +func (k *KinesisConsumer) Init() error { + // Set defaults + if k.MaxUndeliveredMessages < 1 { + k.MaxUndeliveredMessages = 1000 } - dynamoDB struct { - AppName string `toml:"app_name"` - TableName string `toml:"table_name"` + if k.ShardIteratorType == "" { + k.ShardIteratorType = "TRIM_HORIZON" } - - checkpoint struct { - streamName string - shardID string + if k.ContentEncoding == "" { + k.ContentEncoding = "identity" } -) -type processContent func([]byte) ([]byte, error) - -func (*KinesisConsumer) SampleConfig() string { - return sampleConfig -} + f, err := getDecodingFunc(k.ContentEncoding) + if err != nil { + return err + } + k.contentDecodingFunc = f -func (k *KinesisConsumer) Init() error { - return k.configureProcessContentEncodingFunc() + return nil } func (k *KinesisConsumer) SetParser(parser telegraf.Parser) { k.parser = parser } -func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { - err := k.connect(ac) - if err != nil { - return err - } - - return nil +func (k *KinesisConsumer) Start(acc telegraf.Accumulator) error { + return k.connect(acc) } func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { if k.cons == nil { return k.connect(acc) } - k.lastSeqNum = maxSeq + // Enforce writing of last received sequence number + k.lastSeqNum = "" return nil } @@ -138,7 +129,7 @@ func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber stri return nil } -func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { +func (k *KinesisConsumer) connect(acc telegraf.Accumulator) error { cfg, err := k.CredentialConfig.Credentials() if err != nil { return err @@ -180,7 +171,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.cons = cons - k.acc = ac.WithTracking(k.MaxUndeliveredMessages) + k.acc = acc.WithTracking(k.MaxUndeliveredMessages) k.records = make(map[telegraf.TrackingID]string, k.MaxUndeliveredMessages) k.checkpoints = make(map[string]checkpoint, k.MaxUndeliveredMessages) k.sem = make(chan struct{}, k.MaxUndeliveredMessages) @@ -204,8 +195,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { case k.sem <- struct{}{}: break } - err := k.onMessage(k.acc, r) - if err != nil { + if err := k.onMessage(k.acc, r); err != nil { <-k.sem k.Log.Errorf("Scan parser error: %v", err) } @@ -223,7 +213,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { } func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { - data, err := k.processContentEncodingFunc(r.Data) + data, err := k.contentDecodingFunc(r.Data) if err != nil { return err } @@ -262,111 +252,37 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { delete(k.records, info.ID()) k.recordsTex.Unlock() - if info.Delivered() { - k.checkpointTex.Lock() - chk, ok := k.checkpoints[sequenceNum] - if !ok { - k.checkpointTex.Unlock() - continue - } - delete(k.checkpoints, sequenceNum) - k.checkpointTex.Unlock() - - // at least once - if strToBint(sequenceNum).Cmp(k.lastSeqNum) > 0 { - continue - } - - k.lastSeqNum = strToBint(sequenceNum) - if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { - k.Log.Debugf("Setting checkpoint failed: %v", err) - } - } else { + if !info.Delivered() { k.Log.Debug("Metric group failed to process") + continue } - } - } -} -func processGzip(data []byte) ([]byte, error) { - zipData, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - defer zipData.Close() - return io.ReadAll(zipData) -} - -func processZlib(data []byte) ([]byte, error) { - zlibData, err := zlib.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - defer zlibData.Close() - return io.ReadAll(zlibData) -} - -func processNoOp(data []byte) ([]byte, error) { - return data, nil -} - -func strToBint(s string) *big.Int { - n, ok := new(big.Int).SetString(s, 10) - if !ok { - return negOne - } - return n -} - -func (k *KinesisConsumer) configureProcessContentEncodingFunc() error { - switch k.ContentEncoding { - case "gzip": - k.processContentEncodingFunc = processGzip - case "zlib": - k.processContentEncodingFunc = processZlib - case "none", "identity", "": - k.processContentEncodingFunc = processNoOp - default: - return fmt.Errorf("unknown content encoding %q", k.ContentEncoding) - } - return nil -} - -type telegrafLoggerWrapper struct { - telegraf.Logger -} + if k.lastSeqNum != "" { + continue + } -func (t *telegrafLoggerWrapper) Log(args ...interface{}) { - t.Trace(args...) -} + // Store the sequence number at least once per gather cycle using the checkpoint + // storage (usually DynamoDB). + k.checkpointTex.Lock() + chk, ok := k.checkpoints[sequenceNum] + if !ok { + k.checkpointTex.Unlock() + continue + } + delete(k.checkpoints, sequenceNum) + k.checkpointTex.Unlock() -func (t *telegrafLoggerWrapper) Logf(classification logging.Classification, format string, v ...interface{}) { - switch classification { - case logging.Debug: - format = "DEBUG " + format - case logging.Warn: - format = "WARN" + format - default: - format = "INFO " + format + k.Log.Tracef("persisting sequence number %q for stream %q and shard %q", sequenceNum) + k.lastSeqNum = sequenceNum + if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { + k.Log.Errorf("Setting checkpoint failed: %v", err) + } + } } - t.Logger.Tracef(format, v...) } -// noopStore implements the storage interface with discard -type noopStore struct{} - -func (n noopStore) SetCheckpoint(_, _, _ string) error { return nil } -func (n noopStore) GetCheckpoint(_, _ string) (string, error) { return "", nil } - func init() { - negOne, _ = new(big.Int).SetString("-1", 10) - inputs.Add("kinesis_consumer", func() telegraf.Input { - return &KinesisConsumer{ - ShardIteratorType: "TRIM_HORIZON", - MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - lastSeqNum: maxSeq, - ContentEncoding: "identity", - } + return &KinesisConsumer{} }) } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go index e09e0df3717a6..b48372571b879 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -14,220 +14,167 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func TestKinesisConsumer_onMessage(t *testing.T) { +func TestInvalidCoding(t *testing.T) { + plugin := &KinesisConsumer{ + ContentEncoding: "notsupported", + } + require.ErrorContains(t, plugin.Init(), "unknown content encoding") +} + +func TestOnMessage(t *testing.T) { + // Prepare messages zlibBytpes, err := base64.StdEncoding.DecodeString( "eF5FjlFrgzAUhf9KuM+2aNB2zdsQ2xe3whQGW8qIeqdhaiSJK0P874u1Y4+Hc/jON0GHxoga858BgUF8fs5fzunHU5Jlj6cEPFDXHvXStGqsrsKWTapq44pW1SetxsF1a8qsRtGt0Yy" + "FKbUcrFT9UbYWtQH2frntkm/s7RInkNU6t9JpWNE5WBAFPo3CcHeg+9D703OziUOhCg6MQ/yakrspuZsyEjdYfsm+Jg2K1jZEfZLKQWUvFglylBobZXDLwSP8//EGpD4NNj7dUJpT6" + "hQY3W33h/AhCt84zDBf5l/MDl08", ) require.NoError(t, err) + gzippedBytes, err := base64.StdEncoding.DecodeString( "H4sIAAFXNGAAA0WOUWuDMBSF/0q4z7Zo0HbN2xDbF7fCFAZbyoh6p2FqJIkrQ/zvi7Vjj4dz+M43QYfGiBrznwGBQXx+zl/O6cdTkmWPpwQ8UNce9dK0aqyuwpZNqmrjilbVJ63GwXVr" + "yqxG0a3RjIUptRysVP1Rtha1AfZ+ue2Sb+ztEieQ1Tq30mlY0TlYEAU+jcJwd6D70PvTc7OJQ6EKDoxD/JqSuym5mzISN1h+yb4mDYrWNkR9kspBZS8WCXKUGhtlcMvBI/z/8QakPg02" + "Pt1QmlPqFBjdbfeH8CEK3zjMMF/mX0TaxZUpAQAA", ) require.NoError(t, err) - notZippedBytes := []byte(`{ - "messageType": "CONTROL_MESSAGE", - "owner": "CloudwatchLogs", - "logGroup": "", - "logStream": "", - "subscriptionFilters": [], - "logEvents": [ - { - "id": "", - "timestamp": 1510254469274, - "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}," - }, - { - "id": "", - "timestamp": 1510254469274, - "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}" - } - ] -}`) - parser := &json.Parser{ - MetricName: "json_test", - Query: "logEvents", - StringFields: []string{"message"}, - } - require.NoError(t, parser.Init()) - type fields struct { - ContentEncoding string - parser telegraf.Parser - records map[telegraf.TrackingID]string - } - type args struct { - r *consumer.Record - } - type expected struct { - numberOfMetrics int - messageContains string + notZippedBytes := []byte(` + { + "messageType": "CONTROL_MESSAGE", + "owner": "CloudwatchLogs", + "logGroup": "", + "logStream": "", + "subscriptionFilters": [], + "logEvents": [ + { + "id": "", + "timestamp": 1510254469274, + "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}," + }, + { + "id": "", + "timestamp": 1510254469274, + "message": "{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}" + } + ] } + `) + tests := []struct { - name string - fields fields - args args - wantErr bool - expected expected + name string + encoding string + records map[telegraf.TrackingID]string + args *consumer.Record + expectedNumber int + expectedContent string }{ { - name: "test no compression", - fields: fields{ - ContentEncoding: "none", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression", + encoding: "none", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test no compression via empty string for ContentEncoding", - fields: fields{ - ContentEncoding: "", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression via empty string for ContentEncoding", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test no compression via identity ContentEncoding", - fields: fields{ - ContentEncoding: "identity", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression via identity ContentEncoding", + encoding: "identity", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test no compression via no ContentEncoding", - fields: fields{ - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: notZippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test no compression via no ContentEncoding", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 2, - }, + expectedNumber: 2, + expectedContent: "bob", }, { - name: "test gzip compression", - fields: fields{ - ContentEncoding: "gzip", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: gzippedBytes, - SequenceNumber: aws.String("anything"), - }, + name: "test gzip compression", + encoding: "gzip", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: gzippedBytes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 1, - }, + expectedNumber: 1, + expectedContent: "bob", }, { - name: "test zlib compression", - fields: fields{ - ContentEncoding: "zlib", - parser: parser, - records: make(map[telegraf.TrackingID]string), - }, - args: args{ - r: &consumer.Record{ - Record: types.Record{ - Data: zlibBytpes, - SequenceNumber: aws.String("anything"), - }, + name: "test zlib compression", + encoding: "zlib", + records: make(map[telegraf.TrackingID]string), + args: &consumer.Record{ + Record: types.Record{ + Data: zlibBytpes, + SequenceNumber: aws.String("anything"), }, }, - wantErr: false, - expected: expected{ - messageContains: "bob", - numberOfMetrics: 1, - }, + expectedNumber: 1, + expectedContent: "bob", }, } - k := &KinesisConsumer{ - ContentEncoding: "notsupported", - } - err = k.Init() - require.Error(t, err) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - k := &KinesisConsumer{ - ContentEncoding: tt.fields.ContentEncoding, - parser: tt.fields.parser, - records: tt.fields.records, + // Prepare JSON parser + parser := &json.Parser{ + MetricName: "json_test", + Query: "logEvents", + StringFields: []string{"message"}, } - err := k.Init() - require.NoError(t, err) + require.NoError(t, parser.Init()) - acc := testutil.Accumulator{} - if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { - t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) + // Setup plugin + plugin := &KinesisConsumer{ + ContentEncoding: tt.encoding, + parser: parser, + records: tt.records, } + require.NoError(t, plugin.Init()) + + var acc testutil.Accumulator + require.NoError(t, plugin.onMessage(acc.WithTracking(tt.expectedNumber), tt.args)) - require.Len(t, acc.Metrics, tt.expected.numberOfMetrics) + actual := acc.GetTelegrafMetrics() + require.Len(t, actual, tt.expectedNumber) - for _, metric := range acc.Metrics { - if logEventMessage, ok := metric.Fields["message"]; ok { - require.Contains(t, logEventMessage.(string), tt.expected.messageContains) - } else { - t.Errorf("Expect logEvents to be present") - } + for _, metric := range actual { + raw, found := metric.GetField("message") + require.True(t, found, "no message present") + message, ok := raw.(string) + require.Truef(t, ok, "message not a string but %T", raw) + require.Contains(t, message, tt.expectedContent) } }) } diff --git a/plugins/inputs/kinesis_consumer/logging.go b/plugins/inputs/kinesis_consumer/logging.go new file mode 100644 index 0000000000000..82e9458654ea4 --- /dev/null +++ b/plugins/inputs/kinesis_consumer/logging.go @@ -0,0 +1,27 @@ +package kinesis_consumer + +import ( + "github.com/aws/smithy-go/logging" + + "github.com/influxdata/telegraf" +) + +type telegrafLoggerWrapper struct { + telegraf.Logger +} + +func (t *telegrafLoggerWrapper) Log(args ...interface{}) { + t.Trace(args...) +} + +func (t *telegrafLoggerWrapper) Logf(classification logging.Classification, format string, v ...interface{}) { + switch classification { + case logging.Debug: + format = "DEBUG " + format + case logging.Warn: + format = "WARN" + format + default: + format = "INFO " + format + } + t.Logger.Tracef(format, v...) +} diff --git a/plugins/inputs/kinesis_consumer/noop_store.go b/plugins/inputs/kinesis_consumer/noop_store.go new file mode 100644 index 0000000000000..f400fdc718b9f --- /dev/null +++ b/plugins/inputs/kinesis_consumer/noop_store.go @@ -0,0 +1,7 @@ +package kinesis_consumer + +// noopStore implements the storage interface with discard +type noopStore struct{} + +func (noopStore) SetCheckpoint(_, _, _ string) error { return nil } +func (noopStore) GetCheckpoint(_, _ string) (string, error) { return "", nil } From 7a3eab9d6dbc260725e8eeefe7c9a67f2e6b4fce Mon Sep 17 00:00:00 2001 From: Landon Clipp <11232769+LandonTClipp@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:31:28 -0600 Subject: [PATCH 147/170] docs(specs): Add `probe` as value to `startup_error_behavior` (#16052) --- docs/specs/tsd-006-startup-error-behavior.md | 13 ++++ docs/specs/tsd-009-probe-on-startup.md | 68 ++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 docs/specs/tsd-009-probe-on-startup.md diff --git a/docs/specs/tsd-006-startup-error-behavior.md b/docs/specs/tsd-006-startup-error-behavior.md index 33fd39d8b16c1..4ae8549546828 100644 --- a/docs/specs/tsd-006-startup-error-behavior.md +++ b/docs/specs/tsd-006-startup-error-behavior.md @@ -75,6 +75,19 @@ must *not* fail on startup errors and should continue running. On startup error, Telegraf must ignore the plugin as-if it was not configured at all, i.e. the plugin must be completely removed from processing. +### `probe` behavior + +When using the `probe` setting for the `startup_error_behavior` option Telegraf +must *not* fail on startup errors and should continue running. On startup error, +Telegraf must ignore the plugin as-if it was not configured at all, i.e. the +plugin must be completely removed from processing, similar to the `ignore` +behavior. Additionally, Telegraf must probe the plugin (as defined in +[TSD-009][tsd_009]) after startup, if it implements the `ProbePlugin` interface. +If probing is available *and* returns an error Telegraf must *ignore* the +plugin as-if it was not configured at all. + +[tsd_009]: /docs/specs/tsd-009-probe-on-startup.md + ## Plugin Requirements Plugins participating in handling startup errors must implement the `Start()` diff --git a/docs/specs/tsd-009-probe-on-startup.md b/docs/specs/tsd-009-probe-on-startup.md new file mode 100644 index 0000000000000..99eec04178b43 --- /dev/null +++ b/docs/specs/tsd-009-probe-on-startup.md @@ -0,0 +1,68 @@ +# Probing plugins after startup + +## Objective + +Allow Telegraf to probe plugins during startup to enable enhanced plugin error +detection like availability of hardware or services + +## Keywords + +inputs, outputs, startup, probe, error, ignore, behavior + +## Overview + +When plugins are first instantiated, Telegraf will call the plugin's `Start()` +method (for inputs) or `Connect()` (for outputs) which will initialize its +configuration based off of config options and the running environment. It is +sometimes the case that while the initialization step succeeds, the upstream +service in which the plugin relies on is not actually running, or is not capable +of being communicated with due to incorrect configuration or environmental +problems. In situations like this, Telegraf does not detect that the plugin's +upstream service is not functioning properly, and thus it will continually call +the plugin during each `Gather()` iteration. This often has the effect of +polluting journald and system logs with voluminous error messages, which creates +issues for system administrators who rely on such logs to identify other +unrelated system problems. + +More background discussion on this option, including other possible avenues, can +be viewed [here](https://github.com/influxdata/telegraf/issues/16028). + +## Probing + +Probing is an action whereby the plugin should ensure that the plugin will be +fully functional on a best effort basis. This may comprise communicating with +its external service, trying to access required devices, entities or executables +etc to ensure that the plugin will not produce errors during e.g. data collection +or data output. Probing must *not* produce, process or output any metrics. + +Plugins that support probing must implement the `ProbePlugin` interface. Such +plugins must behave in the following manner: + +1. Return an error if the external dependencies (hardware, services, +executables, etc.) of the plugin are not available. +2. Return an error if information cannot be gathered (in the case of inputs) or +sent (in the case of outputs) due to unrecoverable issues. For example, invalid +authentication, missing permissions, or non-existent endpoints. +3. Otherwise, return `nil` indicating the plugin will be fully functional. + +## Plugin Requirements + +Plugins that allow probing must implement the `ProbePlugin` interface. The +exact implementation depends on the plugin's functionality and requirements, +but generally it should take the same actions as it would during normal operation +e.g. calling `Gather()` or `Write()` and check if errors occur. If probing fails, +it must be safe to call the plugin's `Close()` method. + +Input plugins must *not* produce metrics, output plugins must *not* send any +metrics to the service. Plugins must *not* influence the later data processing or +collection by modifying the internal state of the plugin or the external state of the +service or hardware. For example, file-offsets or other service states must be +reset to not lose data during the first gather or write cycle. + +Plugins must return `nil` upon successful probing or an error otherwise. + +## Related Issues + +- [#16028](https://github.com/influxdata/telegraf/issues/16028) +- [#15916](https://github.com/influxdata/telegraf/pull/15916) +- [#16001](https://github.com/influxdata/telegraf/pull/16001) From d9540e25619e8842c9be4d50ca55f7ebc4abfd61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 11 Dec 2024 21:32:16 +0100 Subject: [PATCH 148/170] test(linters): Enable `testifylint`: `contains`, `encoded-compare` and `regexp` (#16262) --- .golangci.yml | 3 ++ plugins/serializers/json/json_test.go | 10 +++---- .../serializers/nowmetric/nowmetric_test.go | 30 ++++++++----------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a7eab4390f758..6c70d0cbc176a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -335,7 +335,9 @@ linters-settings: - blank-import - bool-compare - compares + - contains - empty + - encoded-compare - error-is-as - error-nil - expected-actual @@ -345,6 +347,7 @@ linters-settings: - len - negative-positive - nil-compare + - regexp - require-error - suite-broken-parallel - suite-dont-use-pkg diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index d562ca79f59d4..8c1b94aa655d7 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -196,10 +196,10 @@ func TestSerializeBatch(t *testing.T) { require.NoError(t, s.Init()) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`+"\n"), - buf, + `{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`, + string(buf), ) } @@ -220,7 +220,7 @@ func TestSerializeBatchSkipInf(t *testing.T) { require.NoError(t, s.Init()) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal(t, []byte(`{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`+"\n"), buf) + require.JSONEq(t, `{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`, string(buf)) } func TestSerializeBatchSkipInfAllFields(t *testing.T) { @@ -239,7 +239,7 @@ func TestSerializeBatchSkipInfAllFields(t *testing.T) { require.NoError(t, s.Init()) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal(t, []byte(`{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`+"\n"), buf) + require.JSONEq(t, `{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`, string(buf)) } func TestSerializeTransformationNonBatch(t *testing.T) { diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index 167963d461466..e0b2de3cce3a1 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -191,13 +191,11 @@ func TestSerializeBatch(t *testing.T) { s := &Serializer{} buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte( - `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ - `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]`, - ), - buf, + `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ + `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]`, + string(buf), ) } @@ -213,10 +211,10 @@ func TestSerializeJSONv2Format(t *testing.T) { s := &Serializer{Format: "jsonv2"} buf, err := s.Serialize(m) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte(`{"records":[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]}`), - buf, + `{"records":[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]}`, + string(buf), ) } @@ -233,15 +231,13 @@ func TestSerializeJSONv2FormatBatch(t *testing.T) { metrics := []telegraf.Metric{m, m} buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - require.Equal( + require.JSONEq( t, - []byte( - `{"records":[`+ - `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ - `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}`+ - `]}`, - ), - buf, + `{"records":[`+ + `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},`+ + `{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}`+ + `]}`, + string(buf), ) } From f01e7d3a4445f03a89ee7affad6a4e7f7640a9bf Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 11 Dec 2024 22:32:52 +0100 Subject: [PATCH 149/170] feat(inputs.systemd_units): Add active_enter_timestamp_us field (#16287) --- go.mod | 2 +- plugins/inputs/systemd_units/README.md | 1 + .../systemd_units/systemd_units_linux.go | 44 +++-- .../systemd_units/systemd_units_test.go | 158 +++++++++--------- 4 files changed, 113 insertions(+), 92 deletions(-) diff --git a/go.mod b/go.mod index 92d3e61b88e52..c0b8ec2f444e6 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,6 @@ require ( github.com/go-sql-driver/mysql v1.8.1 github.com/go-stomp/stomp v2.1.4+incompatible github.com/gobwas/glob v0.2.3 - github.com/godbus/dbus/v5 v5.1.0 github.com/gofrs/uuid/v5 v5.3.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec @@ -351,6 +350,7 @@ require ( github.com/goburrow/serial v0.1.1-0.20211022031912-bfb69110f8dd // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.1 // indirect diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index 5364f43ef574c..679d5819e9691 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -94,6 +94,7 @@ The following *additional* metrics are available with `details = true`: - swap_current (uint, current swap usage) - swap_peak (uint, peak swap usage) - mem_avail (uint, available memory for this unit) + - active_enter_timestamp_us (uint, timestamp in us when entered the state) ### Load diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index 443095ee203d8..2500b6cce62be 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -123,17 +123,18 @@ type client interface { ListUnitFilesByPatternsContext(ctx context.Context, states, pattern []string) ([]dbus.UnitFile, error) ListUnitsByNamesContext(ctx context.Context, units []string) ([]dbus.UnitStatus, error) GetUnitTypePropertiesContext(ctx context.Context, unit, unitType string) (map[string]interface{}, error) - GetUnitPropertyContext(ctx context.Context, unit, propertyName string) (*dbus.Property, error) + GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) ListUnitsContext(ctx context.Context) ([]dbus.UnitStatus, error) } type archParams struct { - client client - pattern []string - filter filter.Filter - unitTypeDBus string - scope string - user string + client client + pattern []string + filter filter.Filter + unitTypeDBus string + scope string + user string + warnUnitProps map[string]bool } func (s *SystemdUnits) Init() error { @@ -176,6 +177,8 @@ func (s *SystemdUnits) Init() error { return fmt.Errorf("invalid 'scope' %q", s.Scope) } + s.warnUnitProps = make(map[string]bool) + return nil } @@ -374,26 +377,35 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { } // Get required unit file properties - var unitFileState string - if v, err := s.client.GetUnitPropertyContext(ctx, state.Name, "UnitFileState"); err == nil { - unitFileState = strings.Trim(v.Value.String(), `'"`) + unitProperties, err := s.client.GetUnitPropertiesContext(ctx, state.Name) + if err != nil && !s.warnUnitProps[state.Name] { + s.Log.Warnf("Cannot read unit properties for %q: %v", state.Name, err) + s.warnUnitProps[state.Name] = true + } + + // Set tags + if v, found := unitProperties["UnitFileState"]; found { + tags["state"] = v.(string) } - var unitFilePreset string - if v, err := s.client.GetUnitPropertyContext(ctx, state.Name, "UnitFilePreset"); err == nil { - unitFilePreset = strings.Trim(v.Value.String(), `'"`) + if v, found := unitProperties["UnitFilePreset"]; found { + tags["preset"] = v.(string) } - tags["state"] = unitFileState - tags["preset"] = unitFilePreset + // Set fields + if v, found := unitProperties["ActiveEnterTimestamp"]; found { + fields["active_enter_timestamp_us"] = v + } fields["status_errno"] = properties["StatusErrno"] fields["restarts"] = properties["NRestarts"] fields["pid"] = properties["MainPID"] + fields["mem_current"] = properties["MemoryCurrent"] fields["mem_peak"] = properties["MemoryPeak"] + fields["mem_avail"] = properties["MemoryAvailable"] + fields["swap_current"] = properties["MemorySwapCurrent"] fields["swap_peak"] = properties["MemorySwapPeak"] - fields["mem_avail"] = properties["MemoryAvailable"] // Sanitize unset memory fields for k, value := range fields { diff --git a/plugins/inputs/systemd_units/systemd_units_test.go b/plugins/inputs/systemd_units/systemd_units_test.go index 7add99775d661..3c2c711110ac0 100644 --- a/plugins/inputs/systemd_units/systemd_units_test.go +++ b/plugins/inputs/systemd_units/systemd_units_test.go @@ -4,7 +4,6 @@ package systemd_units import ( "context" - "errors" "fmt" "math" "os/user" @@ -13,7 +12,6 @@ import ( "time" sdbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -25,12 +23,13 @@ import ( ) type properties struct { - uf *sdbus.UnitFile - utype string - state *sdbus.UnitStatus - ufPreset string - ufState string - properties map[string]interface{} + uf *sdbus.UnitFile + utype string + state *sdbus.UnitStatus + ufPreset string + ufState string + ufActiveEnter uint64 + properties map[string]interface{} } func TestDefaultPattern(t *testing.T) { @@ -284,6 +283,7 @@ func TestListFiles(t *testing.T) { } func TestShow(t *testing.T) { + enter := time.Now().UnixMicro() tests := []struct { name string properties map[string]properties @@ -301,8 +301,9 @@ func TestShow(t *testing.T) { ActiveState: "active", SubState: "running", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: uint64(enter), properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 0, @@ -328,17 +329,18 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": 0, - "sub_code": 0, - "status_errno": 0, - "restarts": 1, - "mem_current": uint64(1000), - "mem_peak": uint64(2000), - "swap_current": uint64(3000), - "swap_peak": uint64(4000), - "mem_avail": uint64(5000), - "pid": 9999, + "load_code": 0, + "active_code": 0, + "sub_code": 0, + "status_errno": 0, + "restarts": 1, + "mem_current": uint64(1000), + "mem_peak": uint64(2000), + "swap_current": uint64(3000), + "swap_peak": uint64(4000), + "mem_avail": uint64(5000), + "pid": 9999, + "active_enter_timestamp_us": uint64(enter), }, time.Unix(0, 0), ), @@ -355,8 +357,9 @@ func TestShow(t *testing.T) { ActiveState: "active", SubState: "exited", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: 0, properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 0, @@ -376,16 +379,17 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": 0, - "sub_code": 4, - "status_errno": 0, - "restarts": 0, - "mem_current": uint64(0), - "mem_peak": uint64(0), - "swap_current": uint64(0), - "swap_peak": uint64(0), - "mem_avail": uint64(0), + "load_code": 0, + "active_code": 0, + "sub_code": 4, + "status_errno": 0, + "restarts": 0, + "mem_current": uint64(0), + "mem_peak": uint64(0), + "swap_current": uint64(0), + "swap_peak": uint64(0), + "mem_avail": uint64(0), + "active_enter_timestamp_us": uint64(0), }, time.Unix(0, 0), ), @@ -402,8 +406,9 @@ func TestShow(t *testing.T) { ActiveState: "failed", SubState: "failed", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: uint64(enter), properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 10, @@ -428,16 +433,17 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": 3, - "sub_code": 12, - "status_errno": 10, - "restarts": 1, - "mem_current": uint64(1000), - "mem_peak": uint64(2000), - "swap_current": uint64(3000), - "swap_peak": uint64(4000), - "mem_avail": uint64(5000), + "load_code": 0, + "active_code": 3, + "sub_code": 12, + "status_errno": 10, + "restarts": 1, + "mem_current": uint64(1000), + "mem_peak": uint64(2000), + "swap_current": uint64(3000), + "swap_peak": uint64(4000), + "mem_avail": uint64(5000), + "active_enter_timestamp_us": uint64(enter), }, time.Unix(0, 0), ), @@ -454,8 +460,9 @@ func TestShow(t *testing.T) { ActiveState: "inactive", SubState: "dead", }, - ufPreset: "disabled", - ufState: "enabled", + ufPreset: "disabled", + ufState: "enabled", + ufActiveEnter: uint64(0), properties: map[string]interface{}{ "Id": "example.service", }, @@ -473,14 +480,15 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 2, - "active_code": 2, - "sub_code": 1, - "mem_current": uint64(0), - "mem_peak": uint64(0), - "swap_current": uint64(0), - "swap_peak": uint64(0), - "mem_avail": uint64(0), + "load_code": 2, + "active_code": 2, + "sub_code": 1, + "mem_current": uint64(0), + "mem_peak": uint64(0), + "swap_current": uint64(0), + "swap_peak": uint64(0), + "mem_avail": uint64(0), + "active_enter_timestamp_us": uint64(0), }, time.Unix(0, 0), ), @@ -517,8 +525,9 @@ func TestShow(t *testing.T) { ActiveState: "inactive", SubState: "dead", }, - ufPreset: "disabled", - ufState: "disabled", + ufPreset: "disabled", + ufState: "disabled", + ufActiveEnter: uint64(0), properties: map[string]interface{}{ "Id": "example.service", "StatusErrno": 0, @@ -543,16 +552,17 @@ func TestShow(t *testing.T) { "preset": "disabled", }, map[string]interface{}{ - "load_code": 0, - "active_code": int64(2), - "sub_code": 1, - "status_errno": 0, - "restarts": 0, - "mem_current": uint64(0), - "mem_peak": uint64(0), - "swap_current": uint64(0), - "swap_peak": uint64(0), - "mem_avail": uint64(0), + "load_code": 0, + "active_code": int64(2), + "sub_code": 1, + "status_errno": 0, + "restarts": 0, + "mem_current": uint64(0), + "mem_peak": uint64(0), + "swap_current": uint64(0), + "swap_peak": uint64(0), + "mem_avail": uint64(0), + "active_enter_timestamp_us": uint64(0), }, time.Unix(0, 0), ), @@ -974,19 +984,17 @@ func (c *fakeClient) GetUnitTypePropertiesContext(_ context.Context, unit, unitT return u.properties, nil } -func (c *fakeClient) GetUnitPropertyContext(_ context.Context, unit, propertyName string) (*sdbus.Property, error) { +func (c *fakeClient) GetUnitPropertiesContext(_ context.Context, unit string) (map[string]interface{}, error) { u, found := c.units[unit] if !found { return nil, nil } - switch propertyName { - case "UnitFileState": - return &sdbus.Property{Name: propertyName, Value: dbus.MakeVariant(u.ufState)}, nil - case "UnitFilePreset": - return &sdbus.Property{Name: propertyName, Value: dbus.MakeVariant(u.ufPreset)}, nil - } - return nil, errors.New("unknown property") + return map[string]interface{}{ + "UnitFileState": u.ufState, + "UnitFilePreset": u.ufPreset, + "ActiveEnterTimestamp": u.ufActiveEnter, + }, nil } func (c *fakeClient) ListUnitsContext(_ context.Context) ([]sdbus.UnitStatus, error) { From 7ca41797b25870eab654a2a8ceacba823abc17b6 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 11 Dec 2024 22:33:31 +0100 Subject: [PATCH 150/170] fix(agent): Skip initialization of second processor state if requested (#16290) --- agent/agent.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 7f00fc6ca9ff5..ed19d3f764c38 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -231,10 +231,12 @@ func (a *Agent) InitPlugins() error { return fmt.Errorf("could not initialize aggregator %s: %w", aggregator.LogName(), err) } } - for _, processor := range a.Config.AggProcessors { - err := processor.Init() - if err != nil { - return fmt.Errorf("could not initialize processor %s: %w", processor.LogName(), err) + if !a.Config.Agent.SkipProcessorsAfterAggregators { + for _, processor := range a.Config.AggProcessors { + err := processor.Init() + if err != nil { + return fmt.Errorf("could not initialize processor %s: %w", processor.LogName(), err) + } } } for _, output := range a.Config.Outputs { From 56c3b19ece4711e34acb7b42cb8c277f5b11c44b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 10:45:53 -0600 Subject: [PATCH 151/170] chore(deps): Bump golang.org/x/crypto from 0.29.0 to 0.31.0 (#16297) --- go.mod | 10 +++++----- go.sum | 12 ++++++++++-- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index c0b8ec2f444e6..37bb5d66d04af 100644 --- a/go.mod +++ b/go.mod @@ -212,14 +212,14 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net v0.0.0-20240925182052-1207426daebd go.step.sm/crypto v0.54.0 - golang.org/x/crypto v0.29.0 + golang.org/x/crypto v0.31.0 golang.org/x/mod v0.21.0 golang.org/x/net v0.31.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 gonum.org/v1/gonum v0.15.1 google.golang.org/api v0.203.0 diff --git a/go.sum b/go.sum index 75fee2cd2ac48..5c3e20e416695 100644 --- a/go.sum +++ b/go.sum @@ -2563,6 +2563,8 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2765,8 +2767,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2906,6 +2908,8 @@ golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2926,6 +2930,8 @@ golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2948,6 +2954,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 1e2f815d2af7cc2dee2fd8ee332a8f1dd935cb90 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 13 Dec 2024 17:46:37 +0100 Subject: [PATCH 152/170] chore(agent): Add warning about changing default for 'skip_processors_after_aggregators' (#16302) --- CHANGELOG.md | 10 ++++++++++ agent/agent.go | 36 ++++++++++++++++++++++++++++++++---- config/config.go | 2 +- 3 files changed, 43 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a022cbf4d0dac..0d73b17fcc318 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,16 @@ # Changelog +## Unreleased + +### Important Changes + +- The default value of `skip_processors_after_aggregators` will change to `true` + with Telegraf `v1.40.0`, skip running the processors again after aggregators! + If you need the current default behavior, please explicitly set the option to + `false`! To silence the warning and use the future default behavior, please + explicitly set the option to `true`. + ## v1.33.0 [2024-12-09] ### New Plugins diff --git a/agent/agent.go b/agent/agent.go index ed19d3f764c38..d4c284ac905a6 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/fatih/color" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -106,6 +107,15 @@ func (a *Agent) Run(ctx context.Context) error { time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) + // Set the default for processor skipping + if a.Config.Agent.SkipProcessorsAfterAggregators == nil { + msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! ` + msg += `If you need the current default behavior, please explicitly set the option to 'false'!` + log.Print("W! [agent] ", color.YellowString(msg)) + skipProcessorsAfterAggregators := false + a.Config.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators + } + log.Printf("D! [agent] Initializing plugins") if err := a.InitPlugins(); err != nil { return err @@ -136,7 +146,7 @@ func (a *Agent) Run(ctx context.Context) error { var au *aggregatorUnit if len(a.Config.Aggregators) != 0 { aggC := next - if len(a.Config.AggProcessors) != 0 && !a.Config.Agent.SkipProcessorsAfterAggregators { + if len(a.Config.AggProcessors) != 0 && !*a.Config.Agent.SkipProcessorsAfterAggregators { aggC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { return err @@ -231,7 +241,7 @@ func (a *Agent) InitPlugins() error { return fmt.Errorf("could not initialize aggregator %s: %w", aggregator.LogName(), err) } } - if !a.Config.Agent.SkipProcessorsAfterAggregators { + if !*a.Config.Agent.SkipProcessorsAfterAggregators { for _, processor := range a.Config.AggProcessors { err := processor.Init() if err != nil { @@ -1000,6 +1010,15 @@ func (a *Agent) Test(ctx context.Context, wait time.Duration) error { // outputC. After gathering pauses for the wait duration to allow service // inputs to run. func (a *Agent) runTest(ctx context.Context, wait time.Duration, outputC chan<- telegraf.Metric) error { + // Set the default for processor skipping + if a.Config.Agent.SkipProcessorsAfterAggregators == nil { + msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! ` + msg += `If you need the current default behavior, please explicitly set the option to 'false'!` + log.Print("W! [agent] ", color.YellowString(msg)) + skipProcessorsAfterAggregators := false + a.Config.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators + } + log.Printf("D! [agent] Initializing plugins") if err := a.InitPlugins(); err != nil { return err @@ -1013,7 +1032,7 @@ func (a *Agent) runTest(ctx context.Context, wait time.Duration, outputC chan<- var au *aggregatorUnit if len(a.Config.Aggregators) != 0 { procC := next - if len(a.Config.AggProcessors) != 0 && !a.Config.Agent.SkipProcessorsAfterAggregators { + if len(a.Config.AggProcessors) != 0 && !*a.Config.Agent.SkipProcessorsAfterAggregators { var err error procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { @@ -1096,6 +1115,15 @@ func (a *Agent) Once(ctx context.Context, wait time.Duration) error { // outputC. After gathering pauses for the wait duration to allow service // inputs to run. func (a *Agent) runOnce(ctx context.Context, wait time.Duration) error { + // Set the default for processor skipping + if a.Config.Agent.SkipProcessorsAfterAggregators == nil { + msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! ` + msg += `If you need the current default behavior, please explicitly set the option to 'false'!` + log.Print("W! [agent] ", color.YellowString(msg)) + skipProcessorsAfterAggregators := false + a.Config.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators + } + log.Printf("D! [agent] Initializing plugins") if err := a.InitPlugins(); err != nil { return err @@ -1113,7 +1141,7 @@ func (a *Agent) runOnce(ctx context.Context, wait time.Duration) error { var au *aggregatorUnit if len(a.Config.Aggregators) != 0 { procC := next - if len(a.Config.AggProcessors) != 0 && !a.Config.Agent.SkipProcessorsAfterAggregators { + if len(a.Config.AggProcessors) != 0 && !*a.Config.Agent.SkipProcessorsAfterAggregators { procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { return err diff --git a/config/config.go b/config/config.go index 6a71646b095da..4790f03e82e18 100644 --- a/config/config.go +++ b/config/config.go @@ -279,7 +279,7 @@ type AgentConfig struct { // Flag to skip running processors after aggregators // By default, processors are run a second time after aggregators. Changing // this setting to true will skip the second run of processors. - SkipProcessorsAfterAggregators bool `toml:"skip_processors_after_aggregators"` + SkipProcessorsAfterAggregators *bool `toml:"skip_processors_after_aggregators"` // Number of attempts to obtain a remote configuration via a URL during // startup. Set to -1 for unlimited attempts. From 531fb0589af2397cd0a5d8696aed865db187552a Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 13 Dec 2024 18:23:50 +0100 Subject: [PATCH 153/170] chore(processors.unpivot): Cleanup code and improve performance (#16299) --- plugins/processors/unpivot/unpivot.go | 45 +++---- plugins/processors/unpivot/unpivot_test.go | 147 +++++++++++++++------ 2 files changed, 124 insertions(+), 68 deletions(-) diff --git a/plugins/processors/unpivot/unpivot.go b/plugins/processors/unpivot/unpivot.go index 3f41d6bb7b9ba..53bfb25aebe27 100644 --- a/plugins/processors/unpivot/unpivot.go +++ b/plugins/processors/unpivot/unpivot.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/processors" ) @@ -18,30 +19,15 @@ type Unpivot struct { ValueKey string `toml:"value_key"` } -func copyWithoutFields(metric telegraf.Metric) telegraf.Metric { - m := metric.Copy() - - fieldKeys := make([]string, 0, len(m.FieldList())) - for _, field := range m.FieldList() { - fieldKeys = append(fieldKeys, field.Key) - } - - for _, fk := range fieldKeys { - m.RemoveField(fk) - } - - return m -} - func (*Unpivot) SampleConfig() string { return sampleConfig } func (p *Unpivot) Init() error { switch p.FieldNameAs { - case "metric": case "", "tag": p.FieldNameAs = "tag" + case "metric": default: return fmt.Errorf("unrecognized metric mode: %q", p.FieldNameAs) } @@ -63,27 +49,28 @@ func (p *Unpivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { } results := make([]telegraf.Metric, 0, fieldCount) - for _, m := range metrics { - base := m - if wm, ok := m.(telegraf.UnwrappableMetric); ok { - base = wm.Unwrap() + for _, src := range metrics { + // Create a copy without fields and tracking information + base := metric.New(src.Name(), make(map[string]string), make(map[string]interface{}), src.Time()) + for _, t := range src.TagList() { + base.AddTag(t.Key, t.Value) } - base = copyWithoutFields(base) - for _, field := range m.FieldList() { - newMetric := base.Copy() - newMetric.AddField(p.ValueKey, field.Value) + // Create a new metric per field and add it to the output + for _, field := range src.FieldList() { + m := base.Copy() + m.AddField(p.ValueKey, field.Value) switch p.FieldNameAs { case "metric": - newMetric.SetName(field.Key) - case "", "tag": - newMetric.AddTag(p.TagKey, field.Key) + m.SetName(field.Key) + case "tag": + m.AddTag(p.TagKey, field.Key) } - results = append(results, newMetric) + results = append(results, m) } - m.Accept() + src.Accept() } return results } diff --git a/plugins/processors/unpivot/unpivot_test.go b/plugins/processors/unpivot/unpivot_test.go index 0152513159ad8..b44632db3ec1b 100644 --- a/plugins/processors/unpivot/unpivot_test.go +++ b/plugins/processors/unpivot/unpivot_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestUnpivot_defaults(t *testing.T) { +func TestDefaults(t *testing.T) { unpivot := &Unpivot{} require.NoError(t, unpivot.Init()) require.Equal(t, "tag", unpivot.FieldNameAs) @@ -20,25 +20,25 @@ func TestUnpivot_defaults(t *testing.T) { require.Equal(t, "value", unpivot.ValueKey) } -func TestUnpivot_invalidMetricMode(t *testing.T) { +func TestInvalidMetricMode(t *testing.T) { unpivot := &Unpivot{FieldNameAs: "unknown"} require.Error(t, unpivot.Init()) } -func TestUnpivot_originalMode(t *testing.T) { +func TestOriginalMode(t *testing.T) { now := time.Now() tests := []struct { name string - unpivot *Unpivot + tagKey string + valueKey string + metrics []telegraf.Metric expected []telegraf.Metric }{ { - name: "simple", - unpivot: &Unpivot{ - TagKey: "name", - ValueKey: "value", - }, + name: "simple", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -61,11 +61,9 @@ func TestUnpivot_originalMode(t *testing.T) { }, }, { - name: "multi fields", - unpivot: &Unpivot{ - TagKey: "name", - ValueKey: "value", - }, + name: "multi fields", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -100,27 +98,33 @@ func TestUnpivot_originalMode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual := tt.unpivot.Apply(tt.metrics...) + plugin := &Unpivot{ + TagKey: tt.tagKey, + ValueKey: tt.valueKey, + } + require.NoError(t, plugin.Init()) + + actual := plugin.Apply(tt.metrics...) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics()) }) } } -func TestUnpivot_fieldMode(t *testing.T) { +func TestFieldMode(t *testing.T) { now := time.Now() tests := []struct { - name string - unpivot *Unpivot - metrics []telegraf.Metric - expected []telegraf.Metric + name string + fieldNameAs string + tagKey string + valueKey string + metrics []telegraf.Metric + expected []telegraf.Metric }{ { - name: "simple", - unpivot: &Unpivot{ - FieldNameAs: "metric", - TagKey: "name", - ValueKey: "value", - }, + name: "simple", + fieldNameAs: "metric", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -141,12 +145,10 @@ func TestUnpivot_fieldMode(t *testing.T) { }, }, { - name: "multi fields", - unpivot: &Unpivot{ - FieldNameAs: "metric", - TagKey: "name", - ValueKey: "value", - }, + name: "multi fields", + fieldNameAs: "metric", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -175,12 +177,10 @@ func TestUnpivot_fieldMode(t *testing.T) { }, }, { - name: "multi fields and tags", - unpivot: &Unpivot{ - FieldNameAs: "metric", - TagKey: "name", - ValueKey: "value", - }, + name: "multi fields and tags", + fieldNameAs: "metric", + tagKey: "name", + valueKey: "value", metrics: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ @@ -217,7 +217,14 @@ func TestUnpivot_fieldMode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual := tt.unpivot.Apply(tt.metrics...) + plugin := &Unpivot{ + FieldNameAs: tt.fieldNameAs, + TagKey: tt.tagKey, + ValueKey: tt.valueKey, + } + require.NoError(t, plugin.Init()) + + actual := plugin.Apply(tt.metrics...) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics()) }) } @@ -247,6 +254,8 @@ func TestTrackedMetricNotLost(t *testing.T) { // Process expected metrics and compare with resulting metrics plugin := &Unpivot{TagKey: "name", ValueKey: "value"} + require.NoError(t, plugin.Init()) + actual := plugin.Apply(input...) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) @@ -262,3 +271,63 @@ func TestTrackedMetricNotLost(t *testing.T) { return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(input)) } + +func BenchmarkAsTag(b *testing.B) { + input := metric.New( + "test", + map[string]string{ + "source": "device A", + "location": "main building", + }, + map[string]interface{}{ + "field0": 0.1, + "field1": 1.2, + "field2": 2.3, + "field3": 3.4, + "field4": 4.5, + "field5": 5.6, + "field6": 6.7, + "field7": 7.8, + "field8": 8.9, + "field9": 9.0, + }, + time.Now(), + ) + + plugin := &Unpivot{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + plugin.Apply(input) + } +} + +func BenchmarkAsMetric(b *testing.B) { + input := metric.New( + "test", + map[string]string{ + "source": "device A", + "location": "main building", + }, + map[string]interface{}{ + "field0": 0.1, + "field1": 1.2, + "field2": 2.3, + "field3": 3.4, + "field4": 4.5, + "field5": 5.6, + "field6": 6.7, + "field7": 7.8, + "field8": 8.9, + "field9": 9.0, + }, + time.Now(), + ) + + plugin := &Unpivot{FieldNameAs: "metric"} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + plugin.Apply(input) + } +} From d0a77cb64ef04798225a288d088f07ababf4ff24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 13 Dec 2024 18:26:34 +0100 Subject: [PATCH 154/170] chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[a-e]` (#16263) --- plugins/inputs/aerospike/aerospike.go | 31 ++++----- plugins/inputs/aerospike/aerospike_test.go | 20 ++---- plugins/inputs/aliyuncms/aliyuncms_test.go | 2 +- plugins/inputs/amd_rocm_smi/amd_rocm_smi.go | 2 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 6 +- plugins/inputs/apache/apache.go | 4 +- plugins/inputs/azure_monitor/azure_monitor.go | 4 +- .../azure_monitor/azure_monitor_test.go | 10 +-- plugins/inputs/bcache/bcache.go | 4 +- plugins/inputs/bond/bond.go | 4 +- plugins/inputs/bond/bond_test.go | 4 +- plugins/inputs/burrow/burrow.go | 8 +-- .../cisco_telemetry_mdt.go | 10 +-- plugins/inputs/clickhouse/clickhouse.go | 22 +++---- plugins/inputs/cloud_pubsub/cloud_pubsub.go | 2 +- .../cloud_pubsub_push/cloud_pubsub_push.go | 2 +- .../cloud_pubsub_push_test.go | 6 +- plugins/inputs/cloudwatch/cloudwatch_test.go | 20 +++--- .../cloudwatch_metric_streams.go | 2 +- plugins/inputs/couchdb/couchdb.go | 64 +++++++++---------- plugins/inputs/csgo/csgo.go | 4 +- .../inputs/ctrlx_datalayer/ctrlx_datalayer.go | 2 +- plugins/inputs/dcos/client.go | 4 +- plugins/inputs/dcos/creds.go | 6 +- plugins/inputs/dcos/dcos.go | 24 +++---- plugins/inputs/dcos/dcos_test.go | 9 +-- plugins/inputs/docker_log/docker_log.go | 2 +- plugins/inputs/docker_log/docker_log_test.go | 2 +- plugins/inputs/dovecot/dovecot.go | 4 +- plugins/inputs/ecs/ecs.go | 4 +- plugins/inputs/elasticsearch/elasticsearch.go | 2 +- .../elasticsearch_query.go | 2 +- plugins/inputs/ethtool/ethtool_linux.go | 6 +- plugins/inputs/ethtool/ethtool_test.go | 10 +-- plugins/inputs/exec/exec.go | 4 +- plugins/inputs/exec/exec_test.go | 3 +- .../{run_notwinodws.go => run_notwindows.go} | 2 +- plugins/inputs/exec/run_windows.go | 2 +- plugins/inputs/execd/execd_test.go | 6 +- plugins/inputs/execd/shim/input.go | 6 +- plugins/inputs/execd/shim/shim_test.go | 22 ++----- 41 files changed, 168 insertions(+), 185 deletions(-) rename plugins/inputs/exec/{run_notwinodws.go => run_notwindows.go} (96%) diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 26a5677075d63..52732cff85cdc 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -121,11 +121,11 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro nodes := c.GetNodes() for _, n := range nodes { nodeHost := n.GetHost().String() - stats, err := a.getNodeInfo(n, asInfoPolicy) + stats, err := getNodeInfo(n, asInfoPolicy) if err != nil { return err } - a.parseNodeInfo(acc, stats, nodeHost, n.GetName()) + parseNodeInfo(acc, stats, nodeHost, n.GetName()) namespaces, err := a.getNamespaces(n, asInfoPolicy) if err != nil { @@ -135,12 +135,12 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro if !a.DisableQueryNamespaces { // Query Namespaces for _, namespace := range namespaces { - stats, err = a.getNamespaceInfo(namespace, n, asInfoPolicy) + stats, err = getNamespaceInfo(namespace, n, asInfoPolicy) if err != nil { continue } - a.parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName()) + parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName()) if a.EnableTTLHistogram { err = a.getTTLHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy) @@ -162,12 +162,12 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro if err == nil { for _, namespaceSet := range namespaceSets { namespace, set := splitNamespaceSet(namespaceSet) - stats, err := a.getSetInfo(namespaceSet, n, asInfoPolicy) + stats, err := getSetInfo(namespaceSet, n, asInfoPolicy) if err != nil { continue } - a.parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName()) + parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName()) if a.EnableTTLHistogram { err = a.getTTLHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy) @@ -189,7 +189,7 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro return nil } -func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { stats, err := n.RequestInfo(infoPolicy, "statistics") if err != nil { return nil, err @@ -198,7 +198,7 @@ func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[stri return stats, nil } -func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) { +func parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -231,7 +231,7 @@ func (a *Aerospike) getNamespaces(n *as.Node, infoPolicy *as.InfoPolicy) ([]stri return namespaces, nil } -func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { stats, err := n.RequestInfo(infoPolicy, "namespace/"+namespace) if err != nil { return nil, err @@ -239,7 +239,8 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *a return stats, err } -func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) { + +func parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -296,7 +297,7 @@ func (a *Aerospike) getSets(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, er return namespaceSets, nil } -func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { stats, err := n.RequestInfo(infoPolicy, "sets/"+namespaceSet) if err != nil { return nil, err @@ -304,7 +305,7 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.I return stats, nil } -func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) { +func parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) { stat := strings.Split( strings.TrimSuffix( stats["sets/"+namespaceSet], ";"), ":") @@ -327,7 +328,7 @@ func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]stri } func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error { - stats, err := a.getHistogram(namespace, set, "ttl", n, infoPolicy) + stats, err := getHistogram(namespace, set, "ttl", n, infoPolicy) if err != nil { return err } @@ -339,7 +340,7 @@ func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespac } func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error { - stats, err := a.getHistogram(namespace, set, "object-size-linear", n, infoPolicy) + stats, err := getHistogram(namespace, set, "object-size-linear", n, infoPolicy) if err != nil { return err } @@ -350,7 +351,7 @@ func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostP return nil } -func (a *Aerospike) getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { +func getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { var queryArg string if len(set) > 0 { queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v;set=%v", histogramType, namespace, set) diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 1f3090f7a23b9..73d7b977dab72 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -309,9 +309,6 @@ func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { } func TestParseNodeInfo(t *testing.T) { - a := &Aerospike{} - var acc testutil.Accumulator - stats := map[string]string{ "statistics": "early_tsvc_from_proxy_error=0;cluster_principal=BB9020012AC4202;cluster_is_member=true", } @@ -327,14 +324,12 @@ func TestParseNodeInfo(t *testing.T) { "node_name": "TestNodeName", } - a.parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") + var acc testutil.Accumulator + parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags) } func TestParseNamespaceInfo(t *testing.T) { - a := &Aerospike{} - var acc testutil.Accumulator - stats := map[string]string{ "namespace/test": "ns_cluster_size=1;effective_replication_factor=1;objects=2;tombstones=0;master_objects=2", } @@ -353,15 +348,12 @@ func TestParseNamespaceInfo(t *testing.T) { "namespace": "test", } - a.parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") + var acc testutil.Accumulator + parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags) } func TestParseSetInfo(t *testing.T) { - a := &Aerospike{} - - var acc testutil.Accumulator - stats := map[string]string{ "sets/test/foo": "objects=1:tombstones=0:memory_data_bytes=26;", } @@ -377,7 +369,9 @@ func TestParseSetInfo(t *testing.T) { "node_name": "TestNodeName", "set": "test/foo", } - a.parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") + + var acc testutil.Accumulator + parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags) } diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index 4042baf1b0237..83dfafad02e3e 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -26,7 +26,7 @@ const inputTitle = "inputs.aliyuncms" type mockGatherAliyunCMSClient struct{} -func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { +func (*mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { resp := new(cms.DescribeMetricListResponse) // switch request.Metric { diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go index b30a8b1b80871..a6fe8b7780d3e 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -131,7 +131,7 @@ func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { return gatherROCmSMI(data, acc) } -func (rsmi *ROCmSMI) Stop() {} +func (*ROCmSMI) Stop() {} func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) { // Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 448efad87d987..e5a32eab5b264 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -64,11 +64,11 @@ type AMQPConsumer struct { decoder internal.ContentDecoder } -func (a *externalAuth) Mechanism() string { +func (*externalAuth) Mechanism() string { return "EXTERNAL" } -func (a *externalAuth) Response() string { +func (*externalAuth) Response() string { return "\000" } @@ -175,7 +175,7 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error { +func (*AMQPConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index 6263404faa549..cb53f2b668ad5 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -120,7 +120,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { switch key { case "Scoreboard": - for field, value := range n.gatherScores(part) { + for field, value := range gatherScores(part) { fields[field] = value } default: @@ -137,7 +137,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { return nil } -func (n *Apache) gatherScores(data string) map[string]interface{} { +func gatherScores(data string) map[string]interface{} { var waiting, open = 0, 0 var s, r, w, k, d, c, l, g, i = 0, 0, 0, 0, 0, 0, 0, 0, 0 diff --git a/plugins/inputs/azure_monitor/azure_monitor.go b/plugins/inputs/azure_monitor/azure_monitor.go index 93b3627bf47d4..a53c8710e40d4 100644 --- a/plugins/inputs/azure_monitor/azure_monitor.go +++ b/plugins/inputs/azure_monitor/azure_monitor.go @@ -58,7 +58,7 @@ type azureClientsCreator interface { //go:embed sample.conf var sampleConfig string -func (am *AzureMonitor) SampleConfig() string { +func (*AzureMonitor) SampleConfig() string { return sampleConfig } @@ -170,7 +170,7 @@ func (am *AzureMonitor) setReceiver() error { return err } -func (acm *azureClientsManager) createAzureClients( +func (*azureClientsManager) createAzureClients( subscriptionID, clientID, clientSecret, tenantID string, clientOptions azcore.ClientOptions, ) (*receiver.AzureClients, error) { diff --git a/plugins/inputs/azure_monitor/azure_monitor_test.go b/plugins/inputs/azure_monitor/azure_monitor_test.go index e51b616baad0b..421b9282493b9 100644 --- a/plugins/inputs/azure_monitor/azure_monitor_test.go +++ b/plugins/inputs/azure_monitor/azure_monitor_test.go @@ -27,7 +27,7 @@ type mockAzureMetricDefinitionsClient struct{} type mockAzureMetricsClient struct{} -func (mam *mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azcore.ClientOptions) (*receiver.AzureClients, error) { +func (*mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azcore.ClientOptions) (*receiver.AzureClients, error) { return &receiver.AzureClients{ Ctx: context.Background(), ResourcesClient: &mockAzureResourcesClient{}, @@ -36,7 +36,7 @@ func (mam *mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azco }, nil } -func (marc *mockAzureResourcesClient) List(_ context.Context, _ *armresources.ClientListOptions) ([]*armresources.ClientListResponse, error) { +func (*mockAzureResourcesClient) List(_ context.Context, _ *armresources.ClientListOptions) ([]*armresources.ClientListResponse, error) { var responses []*armresources.ClientListResponse file, err := os.ReadFile("testdata/json/azure_resources_response.json") @@ -59,7 +59,7 @@ func (marc *mockAzureResourcesClient) List(_ context.Context, _ *armresources.Cl return responses, nil } -func (marc *mockAzureResourcesClient) ListByResourceGroup( +func (*mockAzureResourcesClient) ListByResourceGroup( _ context.Context, resourceGroup string, _ *armresources.ClientListByResourceGroupOptions) ([]*armresources.ClientListByResourceGroupResponse, error) { @@ -105,7 +105,7 @@ func (marc *mockAzureResourcesClient) ListByResourceGroup( return nil, errors.New("resource group was not found") } -func (mamdc *mockAzureMetricDefinitionsClient) List( +func (*mockAzureMetricDefinitionsClient) List( _ context.Context, resourceID string, _ *armmonitor.MetricDefinitionsClientListOptions) (armmonitor.MetricDefinitionsClientListResponse, error) { @@ -146,7 +146,7 @@ func (mamdc *mockAzureMetricDefinitionsClient) List( return armmonitor.MetricDefinitionsClientListResponse{}, errors.New("resource ID was not found") } -func (mamc *mockAzureMetricsClient) List( +func (*mockAzureMetricsClient) List( _ context.Context, resourceID string, _ *armmonitor.MetricsClientListOptions) (armmonitor.MetricsClientListResponse, error) { diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 37114a2d921a1..8a9c79fc19c22 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -53,7 +53,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { continue } } - if err := b.gatherBcache(bdev, acc); err != nil { + if err := gatherBcache(bdev, acc); err != nil { return fmt.Errorf("gathering bcache failed: %w", err) } } @@ -97,7 +97,7 @@ func prettyToBytes(v string) uint64 { return uint64(result) } -func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { +func gatherBcache(bdev string, acc telegraf.Accumulator) error { tags := getTags(bdev) metrics, err := filepath.Glob(bdev + "/stats_total/*") if err != nil { diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index a5c244ad3ce3e..6fdb33ba6ab8b 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -66,7 +66,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { if err != nil { acc.AddError(err) } - bond.gatherSysDetails(bondName, files, acc) + gatherSysDetails(bondName, files, acc) } } return nil @@ -164,7 +164,7 @@ func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) { return output, nil } -func (bond *Bond) gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) { +func gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) { var slaves []string var adPortCount int diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index 18d5c71ace644..17d91c640d498 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -145,7 +145,7 @@ func TestGatherBondInterface(t *testing.T) { acc = testutil.Accumulator{} require.NoError(t, bond.gatherBondInterface("bondLACP", sampleTestLACP, &acc)) - bond.gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) + gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACP"}) acc.AssertContainsTaggedFields( t, @@ -169,7 +169,7 @@ func TestGatherBondInterface(t *testing.T) { acc = testutil.Accumulator{} require.NoError(t, bond.gatherBondInterface("bondLACPUpDown", sampleTestLACPFirstUpSecondDown, &acc)) - bond.gatherSysDetails("bondLACPUpDown", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) + gatherSysDetails("bondLACPUpDown", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACPUpDown"}) acc.AssertContainsTaggedFields( t, diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 0cdf8a00bf8b9..05c2f84ed9f3b 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -289,14 +289,14 @@ func (b *Burrow) gatherTopics(guard chan struct{}, src *url.URL, cluster string, return } - b.genTopicMetrics(tr, cluster, topic, acc) + genTopicMetrics(tr, cluster, topic, acc) }(topic) } wg.Wait() } -func (b *Burrow) genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) { +func genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) { for i, offset := range r.Offsets { tags := map[string]string{ "cluster": cluster, @@ -346,7 +346,7 @@ func (b *Burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, return } - b.genGroupStatusMetrics(gr, cluster, group, acc) + genGroupStatusMetrics(gr, cluster, group, acc) b.genGroupLagMetrics(gr, cluster, group, acc) }(group) } @@ -354,7 +354,7 @@ func (b *Burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, wg.Wait() } -func (b *Burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) { +func genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) { partitionCount := r.Status.PartitionCount if partitionCount == 0 { partitionCount = len(r.Status.Partitions) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index b364c6e914f64..481e60a96a9c6 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -218,7 +218,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { return nil } -func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error { +func (*CiscoTelemetryMDT) Gather(telegraf.Accumulator) error { return nil } @@ -541,7 +541,7 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet } } -func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, +func parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, encodingPath string, tags map[string]string, timestamp time.Time) { // RIB measurement := encodingPath @@ -574,7 +574,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem } } -func (c *CiscoTelemetryMDT) parseMicroburst(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, +func parseMicroburst(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, encodingPath string, tags map[string]string, timestamp time.Time) { var nxMicro *telemetry.TelemetryField var nxMicro1 *telemetry.TelemetryField @@ -623,12 +623,12 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup isDme := strings.Contains(encodingPath, "sys/") if encodingPath == "rib" { // handle native data path rib - c.parseRib(grouper, field, encodingPath, tags, timestamp) + parseRib(grouper, field, encodingPath, tags, timestamp) return } if encodingPath == "microburst" { // dump microburst - c.parseMicroburst(grouper, field, encodingPath, tags, timestamp) + parseMicroburst(grouper, field, encodingPath, tags, timestamp) return } if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 { diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 73a8e39c623bb..11ec30251168c 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -210,7 +210,7 @@ func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, met Value float64 `json:"value"` } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) fields := make(map[string]interface{}) if commonMetricsIsFloat[metric] { @@ -241,7 +241,7 @@ func (ch *ClickHouse) zookeeper(acc telegraf.Accumulator, conn *connect) error { if err := ch.execQuery(conn.url, systemZookeeperExistsSQL, &zkExists); err != nil { return err } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) if len(zkExists) > 0 && zkExists[0].ZkExists > 0 { var zkRootNodes []struct { @@ -270,7 +270,7 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) return err } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) if len(replicationQueueExists) > 0 && replicationQueueExists[0].ReplicationQueueExists > 0 { var replicationTooManyTries []struct { @@ -301,7 +301,7 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err } if len(detachedParts) > 0 { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) acc.AddFields("clickhouse_detached_parts", map[string]interface{}{ "detached_parts": uint64(detachedParts[0].DetachedParts), @@ -323,7 +323,7 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro } for _, dict := range brokenDictionaries { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) isLoaded := uint64(1) if dict.Status != "LOADED" { @@ -356,7 +356,7 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { } if len(mutationsStatus) > 0 { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) acc.AddFields("clickhouse_mutations", map[string]interface{}{ @@ -384,7 +384,7 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { } for _, disk := range disksStatus { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) tags["name"] = disk.Name tags["path"] = disk.Path @@ -413,7 +413,7 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { } for _, process := range processesStats { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) tags["query_type"] = process.QueryType acc.AddFields("clickhouse_processes", @@ -448,7 +448,7 @@ func (ch *ClickHouse) textLog(acc telegraf.Accumulator, conn *connect) error { } for _, textLogItem := range textLogLast10MinMessages { - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) tags["level"] = textLogItem.Level acc.AddFields("clickhouse_text_log", map[string]interface{}{ @@ -473,7 +473,7 @@ func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error { if err := ch.execQuery(conn.url, systemPartsSQL, &parts); err != nil { return err } - tags := ch.makeDefaultTags(conn) + tags := makeDefaultTags(conn) for _, part := range parts { tags["table"] = part.Table @@ -490,7 +490,7 @@ func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error { return nil } -func (ch *ClickHouse) makeDefaultTags(conn *connect) map[string]string { +func makeDefaultTags(conn *connect) map[string]string { tags := map[string]string{ "source": conn.Hostname, } diff --git a/plugins/inputs/cloud_pubsub/cloud_pubsub.go b/plugins/inputs/cloud_pubsub/cloud_pubsub.go index 0f686b3e7f0df..d91c55f6683bf 100644 --- a/plugins/inputs/cloud_pubsub/cloud_pubsub.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub.go @@ -152,7 +152,7 @@ func (ps *PubSub) Start(ac telegraf.Accumulator) error { } // Gather does nothing for this service input. -func (ps *PubSub) Gather(_ telegraf.Accumulator) error { +func (*PubSub) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go index e745ee57eeb2c..d446d04e991bc 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go @@ -133,7 +133,7 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { return nil } -func (p *PubSubPush) Gather(_ telegraf.Accumulator) error { +func (*PubSubPush) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go index 9e8aa07d1f3db..06d91190f97bc 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go @@ -219,7 +219,7 @@ func TestServeHTTP(t *testing.T) { type testMetricMaker struct{} -func (tm *testMetricMaker) Name() string { +func (*testMetricMaker) Name() string { return "TestPlugin" } @@ -227,11 +227,11 @@ func (tm *testMetricMaker) LogName() string { return tm.Name() } -func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { +func (*testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } -func (tm *testMetricMaker) Log() telegraf.Logger { +func (*testMetricMaker) Log() telegraf.Logger { return logger.New("test", "test", "") } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 4cf11fe5955db..602da9b460c5f 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -21,7 +21,7 @@ import ( type mockGatherCloudWatchClient struct{} -func (m *mockGatherCloudWatchClient) ListMetrics( +func (*mockGatherCloudWatchClient) ListMetrics( _ context.Context, params *cloudwatch.ListMetricsInput, _ ...func(*cloudwatch.Options), @@ -56,7 +56,7 @@ func (m *mockGatherCloudWatchClient) ListMetrics( return response, nil } -func (m *mockGatherCloudWatchClient) GetMetricData( +func (*mockGatherCloudWatchClient) GetMetricData( _ context.Context, params *cloudwatch.GetMetricDataInput, _ ...func(*cloudwatch.Options), @@ -307,10 +307,10 @@ func TestGather_MultipleNamespaces(t *testing.T) { type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics( - _ context.Context, - _ *cloudwatch.ListMetricsInput, - _ ...func(*cloudwatch.Options), +func (*mockSelectMetricsCloudWatchClient) ListMetrics( + context.Context, + *cloudwatch.ListMetricsInput, + ...func(*cloudwatch.Options), ) (*cloudwatch.ListMetricsOutput, error) { metrics := make([]types.Metric, 0) // 4 metrics are available @@ -358,10 +358,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics( return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData( - _ context.Context, - _ *cloudwatch.GetMetricDataInput, - _ ...func(*cloudwatch.Options), +func (*mockSelectMetricsCloudWatchClient) GetMetricData( + context.Context, + *cloudwatch.GetMetricDataInput, + ...func(*cloudwatch.Options), ) (*cloudwatch.GetMetricDataOutput, error) { return nil, nil } diff --git a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go index a453932859139..6e824f50634a1 100644 --- a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go +++ b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go @@ -149,7 +149,7 @@ func (cms *CloudWatchMetricStreams) Start(acc telegraf.Accumulator) error { return nil } -func (cms *CloudWatchMetricStreams) Gather(_ telegraf.Accumulator) error { +func (*CloudWatchMetricStreams) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index f24bd795621f3..d0917df11c039 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -207,43 +207,43 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri fields := make(map[string]interface{}, 31) // CouchDB meta stats: - c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses) - c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites) - c.generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases) - c.generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits) - c.generateFields(fields, "couchdb_request_time", requestTime) - c.generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads) - c.generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles) + generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses) + generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites) + generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases) + generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits) + generateFields(fields, "couchdb_request_time", requestTime) + generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads) + generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles) // http request methods stats: - c.generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut) - c.generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet) - c.generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy) - c.generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete) - c.generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost) - c.generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead) + generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut) + generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet) + generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy) + generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete) + generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost) + generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead) // status code stats: - c.generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200) - c.generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201) - c.generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202) - c.generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301) - c.generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304) - c.generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400) - c.generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401) - c.generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403) - c.generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404) - c.generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405) - c.generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409) - c.generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412) - c.generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500) + generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200) + generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201) + generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202) + generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301) + generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304) + generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400) + generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401) + generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403) + generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404) + generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405) + generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409) + generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412) + generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500) // httpd stats: - c.generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges) - c.generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads) - c.generateFields(fields, "httpd_requests", stats.Httpd.Requests) - c.generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests) - c.generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads) + generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges) + generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads) + generateFields(fields, "httpd_requests", stats.Httpd.Requests) + generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests) + generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads) tags := map[string]string{ "server": host, @@ -252,7 +252,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri return nil } -func (c *CouchDB) generateFields(fields map[string]interface{}, prefix string, obj metaData) { +func generateFields(fields map[string]interface{}, prefix string, obj metaData) { if obj.Value != nil { fields[prefix+"_value"] = *obj.Value } diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go index ed91a39ceeb0a..718139c53822d 100644 --- a/plugins/inputs/csgo/csgo.go +++ b/plugins/inputs/csgo/csgo.go @@ -61,7 +61,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error { } // Generate the metric and add it to the accumulator - m, err := s.parseResponse(addr, response, t) + m, err := parseResponse(addr, response, t) if err != nil { acc.AddError(err) return @@ -74,7 +74,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *CSGO) parseResponse(addr, response string, t time.Time) (telegraf.Metric, error) { +func parseResponse(addr, response string, t time.Time) (telegraf.Metric, error) { rows := strings.Split(response, "\n") if len(rows) < 2 { return nil, errors.New("bad response") diff --git a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go index c6c4791597e39..b09cb4398b083 100644 --- a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go +++ b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go @@ -131,7 +131,7 @@ func (c *CtrlXDataLayer) Start(acc telegraf.Accumulator) error { return nil } -func (c *CtrlXDataLayer) Gather(_ telegraf.Accumulator) error { +func (*CtrlXDataLayer) Gather(telegraf.Accumulator) error { // Metrics are sent to the accumulator asynchronously in worker thread. So nothing to do here. return nil } diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 1b1af7d818e69..d89a023d0798f 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -133,7 +133,7 @@ func (c *clusterClient) setToken(token string) { } func (c *clusterClient) login(ctx context.Context, sa *serviceAccount) (*authToken, error) { - token, err := c.createLoginToken(sa) + token, err := createLoginToken(sa) if err != nil { return nil, err } @@ -316,7 +316,7 @@ func (c *clusterClient) toURL(path string) string { return clusterURL.String() } -func (c *clusterClient) createLoginToken(sa *serviceAccount) (string, error) { +func createLoginToken(sa *serviceAccount) (string, error) { token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{ UID: sa.accountID, RegisteredClaims: jwt.RegisteredClaims{ diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 411c3c7329174..8b195bc98ba94 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -59,14 +59,14 @@ func (c *tokenCreds) token(_ context.Context, _ client) (string, error) { return token, nil } -func (c *tokenCreds) isExpired() bool { +func (*tokenCreds) isExpired() bool { return true } -func (c *nullCreds) token(_ context.Context, _ client) (string, error) { +func (*nullCreds) token(context.Context, client) (string, error) { return "", nil } -func (c *nullCreds) isExpired() bool { +func (*nullCreds) isExpired() bool { return true } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 1b099fb3b1ef4..098954b177fcc 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -131,7 +131,7 @@ func (d *DCOS) gatherNode(ctx context.Context, acc telegraf.Accumulator, cluster acc.AddError(err) return } - d.addNodeMetrics(acc, cluster, m) + addNodeMetrics(acc, cluster, m) }() d.gatherContainers(ctx, acc, cluster, node) @@ -160,7 +160,7 @@ func (d *DCOS) gatherContainers(ctx context.Context, acc telegraf.Accumulator, c acc.AddError(err) return } - d.addContainerMetrics(acc, cluster, m) + addContainerMetrics(acc, cluster, m) }(container.ID) } @@ -177,14 +177,14 @@ func (d *DCOS) gatherContainers(ctx context.Context, acc telegraf.Accumulator, c acc.AddError(err) return } - d.addAppMetrics(acc, cluster, m) + addAppMetrics(acc, cluster, m) }(container.ID) } } wg.Wait() } -func (d *DCOS) createPoints(m *metrics) []*point { +func createPoints(m *metrics) []*point { points := make(map[string]*point) for _, dp := range m.Datapoints { fieldKey := strings.ReplaceAll(dp.Name, ".", "_") @@ -244,10 +244,10 @@ func (d *DCOS) createPoints(m *metrics) []*point { return results } -func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) { +func addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) { tm := time.Now() - points := d.createPoints(m) + points := createPoints(m) for _, p := range points { tags := make(map[string]string) @@ -266,16 +266,16 @@ func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *me } } -func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { - d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions) +func addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { + addMetrics(acc, cluster, "dcos_node", m, nodeDimensions) } -func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { - d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions) +func addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { + addMetrics(acc, cluster, "dcos_container", m, containerDimensions) } -func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { - d.addMetrics(acc, cluster, "dcos_app", m, appDimensions) +func addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) { + addMetrics(acc, cluster, "dcos_app", m, appDimensions) } func (d *DCOS) initialize() error { diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 4915b73c6f8d7..60e32e0f0f6a0 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -196,8 +196,7 @@ func TestAddNodeMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - dcos := &DCOS{} - dcos.addNodeMetrics(&acc, "a", tt.metrics) + addNodeMetrics(&acc, "a", tt.metrics) for i, ok := range tt.check(&acc) { require.Truef(t, ok, "Index was not true: %d", i) } @@ -267,8 +266,7 @@ func TestAddContainerMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - dcos := &DCOS{} - dcos.addContainerMetrics(&acc, "a", tt.metrics) + addContainerMetrics(&acc, "a", tt.metrics) for i, ok := range tt.check(&acc) { require.Truef(t, ok, "Index was not true: %d", i) } @@ -341,8 +339,7 @@ func TestAddAppMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - dcos := &DCOS{} - dcos.addAppMetrics(&acc, "a", tt.metrics) + addAppMetrics(&acc, "a", tt.metrics) for i, ok := range tt.check(&acc) { require.Truef(t, ok, "Index was not true: %d", i) } diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index abc0e489e01d5..8e6eb1ee0c85a 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -128,7 +128,7 @@ func (d *DockerLogs) Init() error { } // Start is a noop which is required for a *DockerLogs to implement the telegraf.ServiceInput interface -func (d *DockerLogs) Start(telegraf.Accumulator) error { +func (*DockerLogs) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index 95dedd43bf42c..ff3b0b808ebf8 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -40,7 +40,7 @@ type response struct { io.Reader } -func (r *response) Close() error { +func (*response) Close() error { return nil } diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index f1564327507b8..ea4864f04ca3b 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -56,7 +56,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(s string, f string) { defer wg.Done() - acc.AddError(d.gatherServer(s, acc, d.Type, f)) + acc.AddError(gatherServer(s, acc, d.Type, f)) }(server, filter) } } @@ -65,7 +65,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { return nil } -func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype, filter string) error { +func gatherServer(addr string, acc telegraf.Accumulator, qtype, filter string) error { var proto string if strings.HasPrefix(addr, "/") { diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index b537c1fdc09c4..712ca3d439df1 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -68,7 +68,7 @@ func (ecs *Ecs) Gather(acc telegraf.Accumulator) error { } // accumulate metrics - ecs.accTask(task, taskTags, acc) + accTask(task, taskTags, acc) ecs.accContainers(task, taskTags, acc) return nil @@ -137,7 +137,7 @@ func resolveEndpoint(ecs *Ecs) { ecs.metadataVersion = 2 } -func (ecs *Ecs) accTask(task *ecsTask, tags map[string]string, acc telegraf.Accumulator) { +func accTask(task *ecsTask, tags map[string]string, acc telegraf.Accumulator) { taskFields := map[string]interface{}{ "desired_status": task.DesiredStatus, "known_status": task.KnownStatus, diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 3cbc7fd2e3b48..b8f51dbc29502 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -159,7 +159,7 @@ func (e *Elasticsearch) Init() error { return nil } -func (e *Elasticsearch) Start(_ telegraf.Accumulator) error { +func (*Elasticsearch) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index 0e06b4b049489..525a9a061d9d7 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -89,7 +89,7 @@ func (e *ElasticsearchQuery) Init() error { return nil } -func (e *ElasticsearchQuery) Start(_ telegraf.Accumulator) error { +func (*ElasticsearchQuery) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index 4629a8500066b..0116fc3d94e9a 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -269,15 +269,15 @@ func (c *commandEthtool) init() error { return nil } -func (c *commandEthtool) driverName(intf namespacedInterface) (driver string, err error) { +func (*commandEthtool) driverName(intf namespacedInterface) (driver string, err error) { return intf.namespace.driverName(intf) } -func (c *commandEthtool) stats(intf namespacedInterface) (stats map[string]uint64, err error) { +func (*commandEthtool) stats(intf namespacedInterface) (stats map[string]uint64, err error) { return intf.namespace.stats(intf) } -func (c *commandEthtool) get(intf namespacedInterface) (stats map[string]uint64, err error) { +func (*commandEthtool) get(intf namespacedInterface) (stats map[string]uint64, err error) { return intf.namespace.get(intf) } diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index 0088a3f3c93de..64e0c848bf789 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -35,19 +35,19 @@ func (n *namespaceMock) name() string { return n.namespaceName } -func (n *namespaceMock) interfaces() ([]namespacedInterface, error) { +func (*namespaceMock) interfaces() ([]namespacedInterface, error) { return nil, errors.New("it is a test bug to invoke this function") } -func (n *namespaceMock) driverName(_ namespacedInterface) (string, error) { +func (*namespaceMock) driverName(_ namespacedInterface) (string, error) { return "", errors.New("it is a test bug to invoke this function") } -func (n *namespaceMock) stats(_ namespacedInterface) (map[string]uint64, error) { +func (*namespaceMock) stats(_ namespacedInterface) (map[string]uint64, error) { return nil, errors.New("it is a test bug to invoke this function") } -func (n *namespaceMock) get(_ namespacedInterface) (map[string]uint64, error) { +func (*namespaceMock) get(_ namespacedInterface) (map[string]uint64, error) { return nil, errors.New("it is a test bug to invoke this function") } @@ -55,7 +55,7 @@ type commandEthtoolMock struct { interfaceMap map[string]*interfaceMock } -func (c *commandEthtoolMock) init() error { +func (*commandEthtoolMock) init() error { // Not required for test mock return nil } diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index efe8c29687a29..66dc7eea0f87b 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -59,7 +59,7 @@ func (*Exec) SampleConfig() string { return sampleConfig } -func (e *Exec) Init() error { +func (*Exec) Init() error { return nil } @@ -121,7 +121,7 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error { return nil } -func (c commandRunner) truncate(buf bytes.Buffer) bytes.Buffer { +func truncate(buf bytes.Buffer) bytes.Buffer { // Limit the number of bytes. didTruncate := false if buf.Len() > maxStderrBytes { diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index def86cb1c9897..eb605f8fd0cde 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -302,10 +302,9 @@ func TestTruncate(t *testing.T) { }, } - c := commandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res := c.truncate(*tt.bufF()) + res := truncate(*tt.bufF()) require.Equal(t, tt.expF().Bytes(), res.Bytes()) }) } diff --git a/plugins/inputs/exec/run_notwinodws.go b/plugins/inputs/exec/run_notwindows.go similarity index 96% rename from plugins/inputs/exec/run_notwinodws.go rename to plugins/inputs/exec/run_notwindows.go index 0fdaf2e73eb37..fa346e590bf0e 100644 --- a/plugins/inputs/exec/run_notwinodws.go +++ b/plugins/inputs/exec/run_notwindows.go @@ -44,7 +44,7 @@ func (c commandRunner) run( out = removeWindowsCarriageReturns(out) if stderr.Len() > 0 && !c.debug { stderr = removeWindowsCarriageReturns(stderr) - stderr = c.truncate(stderr) + stderr = truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr diff --git a/plugins/inputs/exec/run_windows.go b/plugins/inputs/exec/run_windows.go index fad0160b3119a..f7acc7c5fb712 100644 --- a/plugins/inputs/exec/run_windows.go +++ b/plugins/inputs/exec/run_windows.go @@ -46,7 +46,7 @@ func (c commandRunner) run( out = removeWindowsCarriageReturns(out) if stderr.Len() > 0 && !c.debug { stderr = removeWindowsCarriageReturns(stderr) - stderr = c.truncate(stderr) + stderr = truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 6368e2d21746f..d4dfcf00c232f 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -362,7 +362,7 @@ func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout tim type TestMetricMaker struct{} -func (tm *TestMetricMaker) Name() string { +func (*TestMetricMaker) Name() string { return "TestPlugin" } @@ -370,11 +370,11 @@ func (tm *TestMetricMaker) LogName() string { return tm.Name() } -func (tm *TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { +func (*TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { return aMetric } -func (tm *TestMetricMaker) Log() telegraf.Logger { +func (*TestMetricMaker) Log() telegraf.Logger { return logger.New("TestPlugin", "test", "") } diff --git a/plugins/inputs/execd/shim/input.go b/plugins/inputs/execd/shim/input.go index cf100256fe0b3..0b4ddf30891fc 100644 --- a/plugins/inputs/execd/shim/input.go +++ b/plugins/inputs/execd/shim/input.go @@ -8,16 +8,16 @@ type inputShim struct { } // LogName satisfies the MetricMaker interface -func (i inputShim) LogName() string { +func (inputShim) LogName() string { return "" } // MakeMetric satisfies the MetricMaker interface -func (i inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric { +func (inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric { return m // don't need to do anything to it. } // Log satisfies the MetricMaker interface -func (i inputShim) Log() telegraf.Logger { +func (inputShim) Log() telegraf.Logger { return nil } diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 63e073e5498fc..e3124ea74b353 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -85,11 +85,7 @@ type testInput struct { metricProcessed chan bool } -func (i *testInput) SampleConfig() string { - return "" -} - -func (i *testInput) Description() string { +func (*testInput) SampleConfig() string { return "" } @@ -105,11 +101,11 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(_ telegraf.Accumulator) error { +func (*testInput) Start(telegraf.Accumulator) error { return nil } -func (i *testInput) Stop() { +func (*testInput) Stop() { } func TestLoadConfig(t *testing.T) { @@ -137,15 +133,11 @@ type serviceInput struct { SecretValue string `toml:"secret_value"` } -func (i *serviceInput) SampleConfig() string { - return "" -} - -func (i *serviceInput) Description() string { +func (*serviceInput) SampleConfig() string { return "" } -func (i *serviceInput) Gather(acc telegraf.Accumulator) error { +func (*serviceInput) Gather(acc telegraf.Accumulator) error { acc.AddFields("measurement", map[string]interface{}{ "field": 1, @@ -157,11 +149,11 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(_ telegraf.Accumulator) error { +func (*serviceInput) Start(telegraf.Accumulator) error { return nil } -func (i *serviceInput) Stop() { +func (*serviceInput) Stop() { } // we can get stuck if stdout gets clogged up and nobody's reading from it. From 678d1f61b4826ed646f9afc5c27b0d484a34d56e Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Mon, 16 Dec 2024 10:15:24 +0800 Subject: [PATCH 155/170] feat(outputs): Fix go mod error --- go.sum | 8 -------- 1 file changed, 8 deletions(-) diff --git a/go.sum b/go.sum index 5c3e20e416695..4361d7679c2f9 100644 --- a/go.sum +++ b/go.sum @@ -2561,8 +2561,6 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2906,8 +2904,6 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2928,8 +2924,6 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2952,8 +2946,6 @@ golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 55fa1d80f85c5990ff5d27902ad65c5dc72fb455 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Mon, 16 Dec 2024 14:17:45 +0800 Subject: [PATCH 156/170] Update inlong_test.go --- plugins/outputs/inlong/inlong_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index be0bef2462c70..ef85b5d0a0b75 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -52,6 +52,7 @@ func TestInlong_Write(t *testing.T) { producer: producer, serializer: s, } + m := metric.New( "cpu", map[string]string{ From c2171f28295e27f82f71c9d0e3bd2ddbf55eb403 Mon Sep 17 00:00:00 2001 From: justinwwhuang Date: Mon, 16 Dec 2024 14:21:35 +0800 Subject: [PATCH 157/170] Update inlong_test.go --- plugins/outputs/inlong/inlong_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/inlong/inlong_test.go b/plugins/outputs/inlong/inlong_test.go index ef85b5d0a0b75..be0bef2462c70 100644 --- a/plugins/outputs/inlong/inlong_test.go +++ b/plugins/outputs/inlong/inlong_test.go @@ -52,7 +52,6 @@ func TestInlong_Write(t *testing.T) { producer: producer, serializer: s, } - m := metric.New( "cpu", map[string]string{ From b079c2809e2e750c2fffc35c06b0bee48c8a0c57 Mon Sep 17 00:00:00 2001 From: Sergio <239811+zomfg@users.noreply.github.com> Date: Mon, 16 Dec 2024 17:06:22 +0100 Subject: [PATCH 158/170] feat(inputs.docker): Support swarm jobs (#16292) --- plugins/inputs/docker/docker.go | 12 +++++++++++ plugins/inputs/docker/docker_test.go | 26 ++++++++++++++++++++++++ plugins/inputs/docker/docker_testdata.go | 25 +++++++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 0f5471eb11840..942c79d7dd8ce 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -309,6 +309,18 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { tags["service_mode"] = "global" fields["tasks_running"] = running[service.ID] fields["tasks_desired"] = tasksNoShutdown[service.ID] + } else if service.Spec.Mode.ReplicatedJob != nil { + tags["service_mode"] = "replicated_job" + fields["tasks_running"] = running[service.ID] + if service.Spec.Mode.ReplicatedJob.MaxConcurrent != nil { + fields["max_concurrent"] = *service.Spec.Mode.ReplicatedJob.MaxConcurrent + } + if service.Spec.Mode.ReplicatedJob.TotalCompletions != nil { + fields["total_completions"] = *service.Spec.Mode.ReplicatedJob.TotalCompletions + } + } else if service.Spec.Mode.GlobalJob != nil { + tags["service_mode"] = "global_job" + fields["tasks_running"] = running[service.ID] } else { d.Log.Error("Unknown replica mode") } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 600227e5b1653..24d2c59469267 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -1102,6 +1102,32 @@ func TestDockerGatherSwarmInfo(t *testing.T) { "service_mode": "global", }, ) + + acc.AssertContainsTaggedFields(t, + "docker_swarm", + map[string]interface{}{ + "tasks_running": int(0), + "max_concurrent": uint64(2), + "total_completions": uint64(2), + }, + map[string]string{ + "service_id": "rfmqydhe8cluzl9hayyrhw5ga", + "service_name": "test3", + "service_mode": "replicated_job", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker_swarm", + map[string]interface{}{ + "tasks_running": int(0), + }, + map[string]string{ + "service_id": "mp50lo68vqgkory4e26ts8f9d", + "service_name": "test4", + "service_mode": "global_job", + }, + ) } func TestContainerStateFilter(t *testing.T) { diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index 57be5a8cb1773..e0b5cb6f6cda0 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -196,6 +196,31 @@ var serviceList = []swarm.Service{ }, }, }, + { + ID: "rfmqydhe8cluzl9hayyrhw5ga", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "test3", + }, + Mode: swarm.ServiceMode{ + ReplicatedJob: &swarm.ReplicatedJob{ + MaxConcurrent: &two, + TotalCompletions: &two, + }, + }, + }, + }, + { + ID: "mp50lo68vqgkory4e26ts8f9d", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "test4", + }, + Mode: swarm.ServiceMode{ + GlobalJob: &swarm.GlobalJob{}, + }, + }, + }, } var taskList = []swarm.Task{ From d45fb0a437845e43862e71aec0d905da3bd7b6e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 17 Dec 2024 15:33:30 +0100 Subject: [PATCH 159/170] chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[f-k]` (#16308) --- .../filesystem_helpers_notwindows.go | 2 +- plugins/inputs/fireboard/fireboard.go | 4 +-- plugins/inputs/gnmi/gnmi.go | 2 +- plugins/inputs/gnmi/gnmi_test.go | 6 ++--- plugins/inputs/gnmi/tag_store.go | 6 ++--- .../google_cloud_storage.go | 2 +- plugins/inputs/graylog/graylog_test.go | 4 +-- plugins/inputs/haproxy/haproxy_test.go | 10 +++----- plugins/inputs/hddtemp/go-hddtemp/hddtemp.go | 2 +- plugins/inputs/hddtemp/hddtemp_test.go | 2 +- plugins/inputs/http/http.go | 2 +- .../http_listener_v2/http_listener_v2.go | 2 +- plugins/inputs/hugepages/hugepages.go | 6 ++--- plugins/inputs/icinga2/icinga2.go | 16 ++++++------ plugins/inputs/infiniband/infiniband_linux.go | 2 +- .../influxdb_listener/influxdb_listener.go | 2 +- .../influxdb_v2_listener.go | 2 +- .../inputs/intel_baseband/intel_baseband.go | 2 +- .../inputs/intel_baseband/log_connector.go | 8 +++--- .../intel_baseband/log_connector_test.go | 4 +-- plugins/inputs/intel_dlb/intel_dlb.go | 2 +- plugins/inputs/intel_pmt/intel_pmt.go | 2 +- plugins/inputs/intel_pmu/intel_pmu_test.go | 12 ++++----- plugins/inputs/intel_powerstat/options.go | 2 +- plugins/inputs/intel_rdt/intel_rdt.go | 2 +- plugins/inputs/intel_rdt/intel_rdt_test.go | 2 +- plugins/inputs/intel_rdt/processes.go | 2 +- plugins/inputs/ipmi_sensor/ipmi_sensor.go | 4 +-- .../inputs/ipmi_sensor/ipmi_sensor_test.go | 6 +---- plugins/inputs/ipset/ipset.go | 2 +- .../jti_openconfig_telemetry.go | 2 +- .../jti_openconfig_telemetry_test.go | 25 ++++++++----------- .../inputs/kafka_consumer/kafka_consumer.go | 2 +- .../kafka_consumer/kafka_consumer_test.go | 22 ++++++++-------- plugins/inputs/kernel/kernel.go | 14 +++++------ plugins/inputs/kernel/kernel_test.go | 14 ++--------- plugins/inputs/kibana/kibana.go | 2 +- plugins/inputs/kube_inventory/certificate.go | 4 +-- plugins/inputs/kube_inventory/endpoint.go | 4 +-- .../inputs/kube_inventory/endpoint_test.go | 7 +----- plugins/inputs/kube_inventory/ingress.go | 4 +-- plugins/inputs/kube_inventory/ingress_test.go | 7 +----- plugins/inputs/kube_inventory/node.go | 4 +-- plugins/inputs/kube_inventory/node_test.go | 2 +- .../inputs/kube_inventory/persistentvolume.go | 4 +-- .../kube_inventory/persistentvolume_test.go | 6 +---- 46 files changed, 104 insertions(+), 141 deletions(-) diff --git a/plugins/inputs/filecount/filesystem_helpers_notwindows.go b/plugins/inputs/filecount/filesystem_helpers_notwindows.go index e1a11c78ec2a4..7f4c6e2ced1e8 100644 --- a/plugins/inputs/filecount/filesystem_helpers_notwindows.go +++ b/plugins/inputs/filecount/filesystem_helpers_notwindows.go @@ -40,7 +40,7 @@ func (f fakeFileInfo) ModTime() time.Time { return f.modtime } func (f fakeFileInfo) IsDir() bool { return f.isdir } func (f fakeFileInfo) Sys() interface{} { return f.sys } -func (f fakeFileSystem) open(name string) (file, error) { +func (fakeFileSystem) open(name string) (file, error) { return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("not implemented by fake filesystem")} } diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index bf45fbe53172c..07398b66f9281 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -87,7 +87,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error { } // Range over all devices, gathering stats. Returns early in case of any error. for _, s := range stats { - r.gatherTemps(s, acc) + gatherTemps(s, acc) } return nil } @@ -105,7 +105,7 @@ func scale(n int) string { } // Gathers stats from a single device, adding them to the accumulator -func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) { +func gatherTemps(s fireboardStats, acc telegraf.Accumulator) { // Construct lookup for scale values for _, t := range s.LatestTemps { diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 3832669b8110d..dcf7101ff10e0 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -314,7 +314,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { return nil } -func (c *GNMI) Gather(_ telegraf.Accumulator) error { +func (*GNMI) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index 4f0feae0fcfc1..f9af63369027a 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -51,15 +51,15 @@ type mockServer struct { grpcServer *grpc.Server } -func (s *mockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { +func (*mockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { return nil, nil } -func (s *mockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { +func (*mockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { return nil, nil } -func (s *mockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { +func (*mockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { return nil, nil } diff --git a/plugins/inputs/gnmi/tag_store.go b/plugins/inputs/gnmi/tag_store.go index af6b2b55f2bcf..1ab48bfd50c10 100644 --- a/plugins/inputs/gnmi/tag_store.go +++ b/plugins/inputs/gnmi/tag_store.go @@ -89,7 +89,7 @@ func (s *tagStore) insert(subscription tagSubscription, path *pathInfo, values [ } } case "elements": - key, match := s.getElementsKeys(path, subscription.Elements) + key, match := getElementsKeys(path, subscription.Elements) if !match || len(values) == 0 { return nil } @@ -141,7 +141,7 @@ func (s *tagStore) lookup(path *pathInfo, metricTags map[string]string) map[stri // Match elements for _, requiredKeys := range s.elements.required { - key, match := s.getElementsKeys(path, requiredKeys) + key, match := getElementsKeys(path, requiredKeys) if !match { continue } @@ -153,7 +153,7 @@ func (s *tagStore) lookup(path *pathInfo, metricTags map[string]string) map[stri return tags } -func (s *tagStore) getElementsKeys(path *pathInfo, elements []string) (string, bool) { +func getElementsKeys(path *pathInfo, elements []string) (string, bool) { // Search for the required path elements and collect a ordered // list of their values to in the form // elementName1={keyA=valueA,keyB=valueB,...},...,elementNameN={keyY=valueY,keyZ=valueZ} diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage.go b/plugins/inputs/google_cloud_storage/google_cloud_storage.go index 920e66b8e0642..2c45552aeb1da 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage.go @@ -57,7 +57,7 @@ func (gcs *GCS) Init() error { return gcs.setOffset() } -func (gcs *GCS) SampleConfig() string { +func (*GCS) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index 0662dc058566b..ecbb40b03cc3a 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -119,10 +119,10 @@ func (c *mockHTTPClient) makeRequest(req *http.Request) (*http.Response, error) return &resp, nil } -func (c *mockHTTPClient) setHTTPClient(_ *http.Client) { +func (*mockHTTPClient) setHTTPClient(*http.Client) { } -func (c *mockHTTPClient) httpClient() *http.Client { +func (*mockHTTPClient) httpClient() *http.Client { return nil } diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index 884dcdc8dc76f..ec34e817bb87f 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -17,9 +17,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -type statServer struct{} - -func (s statServer) serverSocket(l net.Listener) { +func serverSocket(l net.Listener) { for { conn, err := l.Accept() if err != nil { @@ -151,8 +149,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { sockets[i] = sock defer sock.Close() //nolint:revive,gocritic // done on purpose, closing will be executed properly - s := statServer{} - go s.serverSocket(sock) + go serverSocket(sock) } r := &HAProxy{ @@ -191,8 +188,7 @@ func TestHaproxyGeneratesMetricsUsingTcp(t *testing.T) { } defer l.Close() - s := statServer{} - go s.serverSocket(l) + go serverSocket(l) r := &HAProxy{ Servers: []string{"tcp://" + l.Addr().String()}, diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go index 7c58cfbea321b..1e511b5e9bb28 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go @@ -25,7 +25,7 @@ func New() *hddtemp { } // Fetch gathers disks data from hddtemp daemon. -func (h *hddtemp) Fetch(address string) ([]Disk, error) { +func (*hddtemp) Fetch(address string) ([]Disk, error) { var ( err error conn net.Conn diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index b266600a95682..f1dd99cf8df5f 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -12,7 +12,7 @@ import ( type mockFetcher struct { } -func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) { +func (*mockFetcher) Fetch(string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ { DeviceName: "Disk1", diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 5cf8eb5919af1..5557d5bd108d5 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -86,7 +86,7 @@ func (h *HTTP) SetParserFunc(fn telegraf.ParserFunc) { h.parserFunc = fn } -func (h *HTTP) Start(_ telegraf.Accumulator) error { +func (*HTTP) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 825da44535801..e940620fb2eed 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -197,7 +197,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { return nil } -func (h *HTTPListenerV2) Gather(_ telegraf.Accumulator) error { +func (*HTTPListenerV2) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/hugepages/hugepages.go b/plugins/inputs/hugepages/hugepages.go index fb7a719179ef1..ebd31845816aa 100644 --- a/plugins/inputs/hugepages/hugepages.go +++ b/plugins/inputs/hugepages/hugepages.go @@ -118,7 +118,7 @@ func (h *Hugepages) Gather(acc telegraf.Accumulator) error { // gatherStatsPerNode collects root hugepages statistics func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error { - return h.gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil) + return gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil) } // gatherStatsPerNode collects hugepages statistics per NUMA node @@ -144,7 +144,7 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error { "node": nodeNumber, } hugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), "hugepages") - err = h.gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags) + err = gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags) if err != nil { return err } @@ -152,7 +152,7 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error { return nil } -func (h *Hugepages) gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter, defaultTags map[string]string) error { +func gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter, defaultTags map[string]string) error { // read metrics from: hugepages/hugepages-*/* hugepagesDirs, err := os.ReadDir(path) if err != nil { diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 7f85b3c0f01b9..a0aa1bca47a6b 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -121,7 +121,7 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { } result := resultObject{} - err = i.parseObjectResponse(resp, &result) + err = parseObjectResponse(resp, &result) if err != nil { return fmt.Errorf("could not parse object response: %w", err) } @@ -145,13 +145,13 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { switch statusType { case "ApiListener": - fields, err = i.parsePerfdataResponse(resp) + fields, err = parsePerfdataResponse(resp) case "CIB": - fields, err = i.parseCIBResponse(resp) + fields, err = parseCIBResponse(resp) case "IdoMysqlConnection": - fields, err = i.parsePerfdataResponse(resp) + fields, err = parsePerfdataResponse(resp) case "IdoPgsqlConnection": - fields, err = i.parsePerfdataResponse(resp) + fields, err = parsePerfdataResponse(resp) } if err != nil { @@ -233,7 +233,7 @@ func (i *Icinga2) icingaRequest(address string) (*http.Response, error) { return resp, nil } -func (i *Icinga2) parseObjectResponse(resp *http.Response, result *resultObject) error { +func parseObjectResponse(resp *http.Response, result *resultObject) error { err := json.NewDecoder(resp.Body).Decode(&result) if err != nil { return err @@ -246,7 +246,7 @@ func (i *Icinga2) parseObjectResponse(resp *http.Response, result *resultObject) return nil } -func (i *Icinga2) parseCIBResponse(resp *http.Response) (map[string]interface{}, error) { +func parseCIBResponse(resp *http.Response) (map[string]interface{}, error) { result := resultCIB{} err := json.NewDecoder(resp.Body).Decode(&result) @@ -262,7 +262,7 @@ func (i *Icinga2) parseCIBResponse(resp *http.Response) (map[string]interface{}, return result.Results[0].Status, nil } -func (i *Icinga2) parsePerfdataResponse(resp *http.Response) (map[string]interface{}, error) { +func parsePerfdataResponse(resp *http.Response) (map[string]interface{}, error) { result := resultPerfdata{} err := json.NewDecoder(resp.Body).Decode(&result) diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 72bbc4714d763..214ba8e08fd67 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -12,7 +12,7 @@ import ( ) // Gather statistics from our infiniband cards -func (i *Infiniband) Gather(acc telegraf.Accumulator) error { +func (*Infiniband) Gather(acc telegraf.Accumulator) error { rdmaDevices := rdmamap.GetRdmaDeviceList() if len(rdmaDevices) == 0 { diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 9186ecac8e54a..044b4c1050d26 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -76,7 +76,7 @@ func (*InfluxDBListener) SampleConfig() string { return sampleConfig } -func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error { +func (*InfluxDBListener) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index fb8a03de83949..52854d6f6e4d6 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -120,7 +120,7 @@ func (h *InfluxDBV2Listener) Init() error { return nil } -func (h *InfluxDBV2Listener) Gather(_ telegraf.Accumulator) error { +func (*InfluxDBV2Listener) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/intel_baseband/intel_baseband.go b/plugins/inputs/intel_baseband/intel_baseband.go index 4017cb74b8946..ae22b7acee11b 100644 --- a/plugins/inputs/intel_baseband/intel_baseband.go +++ b/plugins/inputs/intel_baseband/intel_baseband.go @@ -56,7 +56,7 @@ type Baseband struct { sockConn *socketConnector } -func (b *Baseband) SampleConfig() string { +func (*Baseband) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/intel_baseband/log_connector.go b/plugins/inputs/intel_baseband/log_connector.go index cfb67b57c1947..82c5367b5f1b1 100644 --- a/plugins/inputs/intel_baseband/log_connector.go +++ b/plugins/inputs/intel_baseband/log_connector.go @@ -135,7 +135,7 @@ func (lc *logConnector) readNumVFs() error { continue } - numVFs, err := lc.parseNumVFs(line) + numVFs, err := parseNumVFs(line) if err != nil { lc.numVFs = -1 return err @@ -189,7 +189,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric, return offsetLine, nil, err } - operationName := lc.parseOperationName(line) + operationName := parseOperationName(line) if len(operationName) == 0 { return offsetLine, nil, errors.New("valid operation name wasn't found in log") } @@ -221,7 +221,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric, } // Example value = Thu Apr 13 13:28:40 2023:INFO:Device Status:: 2 VFs -func (lc *logConnector) parseNumVFs(s string) (int, error) { +func parseNumVFs(s string) (int, error) { i := strings.LastIndex(s, deviceStatusStartPrefix) if i == -1 { return 0, errors.New("couldn't find device status prefix in line") @@ -244,7 +244,7 @@ func (lc *logConnector) parseNumVFs(s string) (int, error) { // Parse Operation name // Example = Thu Apr 13 13:28:40 2023:INFO:5GUL counters: Code Blocks // Output: 5GUL -func (lc *logConnector) parseOperationName(s string) string { +func parseOperationName(s string) string { i := strings.Index(s, infoLine) if i >= 0 { j := strings.Index(s[i:], countersLine) diff --git a/plugins/inputs/intel_baseband/log_connector_test.go b/plugins/inputs/intel_baseband/log_connector_test.go index 9d07e93ff754b..b2b286dab6d26 100644 --- a/plugins/inputs/intel_baseband/log_connector_test.go +++ b/plugins/inputs/intel_baseband/log_connector_test.go @@ -240,11 +240,9 @@ func TestParseOperationName(t *testing.T) { {"", ""}, } - logConnector := prepareLogConnMock() - require.NotNil(t, logConnector) for _, tc := range testCases { t.Run("expected "+tc.expected, func(t *testing.T) { - operationName := logConnector.parseOperationName(tc.input) + operationName := parseOperationName(tc.input) require.Equal(t, tc.expected, operationName) }) } diff --git a/plugins/inputs/intel_dlb/intel_dlb.go b/plugins/inputs/intel_dlb/intel_dlb.go index 643713ce1cba8..ddbe40c1adf58 100644 --- a/plugins/inputs/intel_dlb/intel_dlb.go +++ b/plugins/inputs/intel_dlb/intel_dlb.go @@ -50,7 +50,7 @@ type IntelDLB struct { maxInitMessageLength uint32 } -func (d *IntelDLB) SampleConfig() string { +func (*IntelDLB) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/intel_pmt/intel_pmt.go b/plugins/inputs/intel_pmt/intel_pmt.go index f61980b7626e4..54e91613caefa 100644 --- a/plugins/inputs/intel_pmt/intel_pmt.go +++ b/plugins/inputs/intel_pmt/intel_pmt.go @@ -56,7 +56,7 @@ type fileInfo struct { pciBdf string // PCI Bus:Device.Function (BDF) } -func (p *IntelPMT) SampleConfig() string { +func (*IntelPMT) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/intel_pmu/intel_pmu_test.go b/plugins/inputs/intel_pmu/intel_pmu_test.go index 6c75f68f68378..2910c905ad100 100644 --- a/plugins/inputs/intel_pmu/intel_pmu_test.go +++ b/plugins/inputs/intel_pmu/intel_pmu_test.go @@ -547,9 +547,9 @@ type fakeFileInfo struct { fileMode os.FileMode } -func (f fakeFileInfo) Name() string { return "" } -func (f fakeFileInfo) Size() int64 { return 0 } -func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } -func (f fakeFileInfo) ModTime() time.Time { return time.Time{} } -func (f fakeFileInfo) IsDir() bool { return false } -func (f fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) Name() string { return "" } +func (fakeFileInfo) Size() int64 { return 0 } +func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } +func (fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (fakeFileInfo) IsDir() bool { return false } +func (fakeFileInfo) Sys() interface{} { return nil } diff --git a/plugins/inputs/intel_powerstat/options.go b/plugins/inputs/intel_powerstat/options.go index 7e422b26bc01f..5e4fd4df4ad72 100644 --- a/plugins/inputs/intel_powerstat/options.go +++ b/plugins/inputs/intel_powerstat/options.go @@ -33,7 +33,7 @@ type optGenerator struct{} // generate takes plugin configuration options and generates options needed // to gather requested metrics. -func (g *optGenerator) generate(cfg optConfig) []ptel.Option { +func (*optGenerator) generate(cfg optConfig) []ptel.Option { opts := make([]ptel.Option, 0) if len(cfg.includedCPUs) != 0 { opts = append(opts, ptel.WithIncludedCPUs(cfg.includedCPUs)) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 1c0685e634bf7..d1427fbdb4f00 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -100,7 +100,7 @@ func (r *IntelRDT) Start(acc telegraf.Accumulator) error { return nil } -func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { +func (*IntelRDT) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index e9468521276fb..7f4dc00919695 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -12,7 +12,7 @@ import ( type mockProc struct{} -func (m *mockProc) getAllProcesses() ([]process, error) { +func (*mockProc) getAllProcesses() ([]process, error) { procs := []process{ {Name: "process", PID: 1000}, {Name: "process2", PID: 1002}, diff --git a/plugins/inputs/intel_rdt/processes.go b/plugins/inputs/intel_rdt/processes.go index 63c8622aa1875..975760b0bfe40 100644 --- a/plugins/inputs/intel_rdt/processes.go +++ b/plugins/inputs/intel_rdt/processes.go @@ -19,7 +19,7 @@ func newProcessor() processesHandler { return &processManager{} } -func (p *processManager) getAllProcesses() ([]process, error) { +func (*processManager) getAllProcesses() ([]process, error) { allProcesses, err := procfs.AllProcs() if err != nil { return nil, err diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor.go b/plugins/inputs/ipmi_sensor/ipmi_sensor.go index b70de8d0e3c2b..403e7b3fe7a59 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor.go @@ -179,7 +179,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server, sensor string) error { return m.parseV1(acc, hostname, out, timestamp) } case "chassis_power_status": - return m.parseChassisPowerStatus(acc, hostname, out, timestamp) + return parseChassisPowerStatus(acc, hostname, out, timestamp) case "dcmi_power_reading": return m.parseDCMIPowerReading(acc, hostname, out, timestamp) } @@ -187,7 +187,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server, sensor string) error { return fmt.Errorf("unknown sensor type %q", sensor) } -func (m *Ipmi) parseChassisPowerStatus(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { +func parseChassisPowerStatus(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Chassis Power is on // Chassis Power is off diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go index 74dab5eb18b8f..03c0c316d28a4 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go @@ -820,14 +820,10 @@ func Test_parsePowerStatus(t *testing.T) { }, } - ipmi := &Ipmi{ - Log: testutil.Logger{}, - } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - err := ipmi.parseChassisPowerStatus(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt) + err := parseChassisPowerStatus(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index 4a7e0938a31ff..be177d10a26ac 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -38,7 +38,7 @@ func (*Ipset) SampleConfig() string { return sampleConfig } -func (i *Ipset) Init() error { +func (*Ipset) Init() error { _, err := exec.LookPath("ipset") if err != nil { return err diff --git a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go index e60dda37245bb..b6c7d039515fa 100644 --- a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go @@ -172,7 +172,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { return nil } -func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error { +func (*OpenConfigTelemetry) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go index 758beb1ff9eef..b98f9100ae92f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go @@ -58,10 +58,7 @@ type openConfigTelemetryServer struct { telemetry.UnimplementedOpenConfigTelemetryServer } -func (s *openConfigTelemetryServer) TelemetrySubscribe( - req *telemetry.SubscriptionRequest, - stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer, -) error { +func (*openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { path := req.PathList[0].Path switch path { case "/sensor": @@ -78,28 +75,28 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe( return nil } -func (s *openConfigTelemetryServer) CancelTelemetrySubscription( - _ context.Context, - _ *telemetry.CancelSubscriptionRequest, +func (*openConfigTelemetryServer) CancelTelemetrySubscription( + context.Context, + *telemetry.CancelSubscriptionRequest, ) (*telemetry.CancelSubscriptionReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetrySubscriptions( - _ context.Context, - _ *telemetry.GetSubscriptionsRequest, +func (*openConfigTelemetryServer) GetTelemetrySubscriptions( + context.Context, + *telemetry.GetSubscriptionsRequest, ) (*telemetry.GetSubscriptionsReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetryOperationalState( - _ context.Context, - _ *telemetry.GetOperationalStateRequest, +func (*openConfigTelemetryServer) GetTelemetryOperationalState( + context.Context, + *telemetry.GetOperationalStateRequest, ) (*telemetry.GetOperationalStateReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { +func (*openConfigTelemetryServer) GetDataEncodings(context.Context, *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { return nil, nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 104ba26156ff6..ac335eec0810f 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -299,7 +299,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error { +func (*KafkaConsumer) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 568da16b6a095..94bc4096617f5 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -240,53 +240,53 @@ type FakeConsumerGroupSession struct { ctx context.Context } -func (s *FakeConsumerGroupSession) Claims() map[string][]int32 { +func (*FakeConsumerGroupSession) Claims() map[string][]int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MemberID() string { +func (*FakeConsumerGroupSession) MemberID() string { panic("not implemented") } -func (s *FakeConsumerGroupSession) GenerationID() int32 { +func (*FakeConsumerGroupSession) GenerationID() int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) { +func (*FakeConsumerGroupSession) MarkOffset(string, int32, int64, string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) { +func (*FakeConsumerGroupSession) ResetOffset(string, int32, int64, string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) { +func (*FakeConsumerGroupSession) MarkMessage(*sarama.ConsumerMessage, string) { } func (s *FakeConsumerGroupSession) Context() context.Context { return s.ctx } -func (s *FakeConsumerGroupSession) Commit() { +func (*FakeConsumerGroupSession) Commit() { } type FakeConsumerGroupClaim struct { messages chan *sarama.ConsumerMessage } -func (c *FakeConsumerGroupClaim) Topic() string { +func (*FakeConsumerGroupClaim) Topic() string { panic("not implemented") } -func (c *FakeConsumerGroupClaim) Partition() int32 { +func (*FakeConsumerGroupClaim) Partition() int32 { panic("not implemented") } -func (c *FakeConsumerGroupClaim) InitialOffset() int64 { +func (*FakeConsumerGroupClaim) InitialOffset() int64 { panic("not implemented") } -func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 { +func (*FakeConsumerGroupClaim) HighWaterMarkOffset() int64 { panic("not implemented") } diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 88c18c2101c45..7ddf0d714762e 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -68,12 +68,12 @@ func (k *Kernel) Init() error { } func (k *Kernel) Gather(acc telegraf.Accumulator) error { - data, err := k.getProcValueBytes(k.statFile) + data, err := getProcValueBytes(k.statFile) if err != nil { return err } - entropyValue, err := k.getProcValueInt(k.entropyStatFile) + entropyValue, err := getProcValueInt(k.entropyStatFile) if err != nil { return err } @@ -137,7 +137,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { extraStats := []string{"general_profit"} for _, f := range stats { - m, err := k.getProcValueInt(filepath.Join(k.ksmStatsDir, f)) + m, err := getProcValueInt(filepath.Join(k.ksmStatsDir, f)) if err != nil { return err } @@ -146,7 +146,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { } for _, f := range extraStats { - m, err := k.getProcValueInt(filepath.Join(k.ksmStatsDir, f)) + m, err := getProcValueInt(filepath.Join(k.ksmStatsDir, f)) if err != nil { // if an extraStats metric doesn't exist in our kernel version, ignore it. continue @@ -166,7 +166,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kernel) getProcValueBytes(path string) ([]byte, error) { +func getProcValueBytes(path string) ([]byte, error) { if _, err := os.Stat(path); os.IsNotExist(err) { return nil, fmt.Errorf("path %q does not exist", path) } else if err != nil { @@ -181,8 +181,8 @@ func (k *Kernel) getProcValueBytes(path string) ([]byte, error) { return data, nil } -func (k *Kernel) getProcValueInt(path string) (int64, error) { - data, err := k.getProcValueBytes(path) +func getProcValueInt(path string) (int64, error) { + data, err := getProcValueBytes(path) if err != nil { return -1, err } diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index da3f3aa46cf3d..23d72949d742a 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -14,23 +14,13 @@ import ( ) func TestGetProcValueInt(t *testing.T) { - k := Kernel{ - statFile: "testdata/stat_file_full", - entropyStatFile: "testdata/entropy_stat_file_full", - } - - d, err := k.getProcValueInt(k.entropyStatFile) + d, err := getProcValueInt("testdata/entropy_stat_file_full") require.NoError(t, err) require.IsType(t, int64(1), d) } func TestGetProcValueByte(t *testing.T) { - k := Kernel{ - statFile: "testdata/stat_file_full", - entropyStatFile: "testdata/entropy_stat_file_full", - } - - d, err := k.getProcValueBytes(k.entropyStatFile) + d, err := getProcValueBytes("testdata/entropy_stat_file_full") require.NoError(t, err) require.IsType(t, []byte("test"), d) } diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 622030728c74b..702b288ea01a1 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -101,7 +101,7 @@ func (*Kibana) SampleConfig() string { return sampleConfig } -func (k *Kibana) Start(_ telegraf.Accumulator) error { +func (*Kibana) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kube_inventory/certificate.go b/plugins/inputs/kube_inventory/certificate.go index 5cf3603288c99..ba71f013b2f83 100644 --- a/plugins/inputs/kube_inventory/certificate.go +++ b/plugins/inputs/kube_inventory/certificate.go @@ -19,7 +19,7 @@ func collectSecrets(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - ki.gatherCertificates(i, acc) + gatherCertificates(i, acc) } } @@ -59,7 +59,7 @@ func getTags(cert *x509.Certificate) map[string]string { return tags } -func (ki *KubernetesInventory) gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) { +func gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) { now := time.Now() for resourceName, val := range r.Data { diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 1eb86eea13b76..742512f6824fe 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -15,11 +15,11 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne return } for _, i := range list.Items { - ki.gatherEndpoint(i, acc) + gatherEndpoint(i, acc) } } -func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { +func gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { creationTs := e.GetCreationTimestamp() if creationTs.IsZero() { return diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index f5be722c925bc..c5a8a7509ed31 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -13,8 +13,6 @@ import ( ) func TestEndpoint(t *testing.T) { - cli := &client{} - now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -256,12 +254,9 @@ func TestEndpoint(t *testing.T) { } for _, v := range tests { - ks := &KubernetesInventory{ - client: cli, - } acc := new(testutil.Accumulator) for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { - ks.gatherEndpoint(endpoint, acc) + gatherEndpoint(endpoint, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index f8a966bc15a46..41890e44c0479 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -15,11 +15,11 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - ki.gatherIngress(i, acc) + gatherIngress(i, acc) } } -func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { +func gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { creationTs := i.GetCreationTimestamp() if creationTs.IsZero() { return diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index a391b3808c29b..0ba519b69bf11 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -13,8 +13,6 @@ import ( ) func TestIngress(t *testing.T) { - cli := &client{} - now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -219,12 +217,9 @@ func TestIngress(t *testing.T) { } for _, v := range tests { - ks := &KubernetesInventory{ - client: cli, - } acc := new(testutil.Accumulator) for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items { - ks.gatherIngress(ingress, acc) + gatherIngress(ingress, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index 8aa4e979a65c3..3660c00c7ad90 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -15,14 +15,14 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI return } - ki.gatherNodeCount(len(list.Items), acc) + gatherNodeCount(len(list.Items), acc) for i := range list.Items { ki.gatherNode(&list.Items[i], acc) } } -func (ki *KubernetesInventory) gatherNodeCount(count int, acc telegraf.Accumulator) { +func gatherNodeCount(count int, acc telegraf.Accumulator) { fields := map[string]interface{}{"node_count": count} tags := make(map[string]string) diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index 5527bca1d020e..00d9093887f7a 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -173,7 +173,7 @@ func TestNode(t *testing.T) { if v.name == "no nodes" { nodeCount := len((v.handler.responseMap["/nodes/"]).(corev1.NodeList).Items) - ks.gatherNodeCount(nodeCount, acc) + gatherNodeCount(nodeCount, acc) } require.Len(t, acc.Metrics, len(v.output)) testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go index 808db450dbcb1..6fb65e9c46874 100644 --- a/plugins/inputs/kube_inventory/persistentvolume.go +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -16,11 +16,11 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki return } for i := range list.Items { - ki.gatherPersistentVolume(&list.Items[i], acc) + gatherPersistentVolume(&list.Items[i], acc) } } -func (ki *KubernetesInventory) gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) { +func gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) { phaseType := 5 switch strings.ToLower(string(pv.Status.Phase)) { case "bound": diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go index 2e3c15b4824a7..1a93f9b2b7a61 100644 --- a/plugins/inputs/kube_inventory/persistentvolume_test.go +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -13,7 +13,6 @@ import ( ) func TestPersistentVolume(t *testing.T) { - cli := &client{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -77,13 +76,10 @@ func TestPersistentVolume(t *testing.T) { } for _, v := range tests { - ks := &KubernetesInventory{ - client: cli, - } acc := new(testutil.Accumulator) items := ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items for i := range items { - ks.gatherPersistentVolume(&items[i], acc) + gatherPersistentVolume(&items[i], acc) } err := acc.FirstError() From f00ab7e54d970465f17f417a3c797d375bc16e02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:04:44 -0600 Subject: [PATCH 160/170] chore(deps): Bump super-linter/super-linter from 7.2.0 to 7.2.1 (#16313) --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 9fd8d494f046a..c16cbfeb88094 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -54,7 +54,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: super-linter/super-linter@v7.2.0 + uses: super-linter/super-linter@v7.2.1 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master From d26158c2d39a414240624f9955c96f88112a00a8 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Tue, 17 Dec 2024 12:05:09 -0500 Subject: [PATCH 161/170] docs(parsers.openmetrics): Update link to specification (#16312) Signed-off-by: David Ashpole --- plugins/parsers/openmetrics/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/openmetrics/README.md b/plugins/parsers/openmetrics/README.md index a9328a8d96072..e582941fc31f7 100644 --- a/plugins/parsers/openmetrics/README.md +++ b/plugins/parsers/openmetrics/README.md @@ -8,7 +8,7 @@ but can also be used by e.g. The plugin allows to output different metric formats as described in the [Metric Formats section](#metric-formats). -[OpenMetrics Text Format]: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md +[OpenMetrics Text Format]: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md ## Configuration From 0f53eb502b0512be098e2f8c3e3bc6c054a5dd93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:05:28 -0600 Subject: [PATCH 162/170] chore(deps): Bump github.com/fatih/color from 1.17.0 to 1.18.0 (#16317) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 37bb5d66d04af..e55c9d79f5410 100644 --- a/go.mod +++ b/go.mod @@ -81,7 +81,7 @@ require ( github.com/eclipse/paho.golang v0.21.0 github.com/eclipse/paho.mqtt.golang v1.5.0 github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc - github.com/fatih/color v1.17.0 + github.com/fatih/color v1.18.0 github.com/go-ldap/ldap/v3 v3.4.8 github.com/go-logfmt/logfmt v0.6.0 github.com/go-ole/go-ole v1.3.0 diff --git a/go.sum b/go.sum index 4361d7679c2f9..5cc624d83442d 100644 --- a/go.sum +++ b/go.sum @@ -1198,8 +1198,8 @@ github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y= From af1685d237f3e7dc2a65de522f5957cf0d140c01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:05:53 -0600 Subject: [PATCH 163/170] chore(deps): Bump github.com/IBM/nzgo/v12 from 12.0.9-0.20231115043259-49c27f2dfe48 to 12.0.9 (#16319) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e55c9d79f5410..70cf59d476572 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/BurntSushi/toml v1.4.0 github.com/ClickHouse/clickhouse-go v1.5.4 github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 + github.com/IBM/nzgo/v12 v12.0.9 github.com/IBM/sarama v1.43.3 github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig v2.22.0+incompatible diff --git a/go.sum b/go.sum index 5cc624d83442d..30d42dde4bce9 100644 --- a/go.sum +++ b/go.sum @@ -735,8 +735,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 h1:TBb4IxmBH0ssmWTUg0C6c9ZnfDmZospTF8f+YbHnbbA= -github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48/go.mod h1:4pvfEkfsrAdqlljsp8HNwv/uzNKy2fzoXBB1aRIssJg= +github.com/IBM/nzgo/v12 v12.0.9 h1:SwzYFU5ooXsTZsQhU6OsbUhs/fQyLvCtlJYSEZ58mN0= +github.com/IBM/nzgo/v12 v12.0.9/go.mod h1:4pvfEkfsrAdqlljsp8HNwv/uzNKy2fzoXBB1aRIssJg= github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= From 8d469cbb049609d607cddccc5bec9e87819627b2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:08:00 -0600 Subject: [PATCH 164/170] chore(deps): Bump github.com/prometheus/common from 0.60.0 to 0.61.0 (#16318) --- go.mod | 10 +++++----- go.sum | 17 ++++++++++------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 70cf59d476572..c67bdf8cfb7c5 100644 --- a/go.mod +++ b/go.mod @@ -165,7 +165,7 @@ require ( github.com/prometheus-community/pro-bing v0.4.1 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.60.0 + github.com/prometheus/common v0.61.0 github.com/prometheus/procfs v0.15.1 github.com/prometheus/prometheus v0.54.1 github.com/rabbitmq/amqp091-go v1.10.0 @@ -187,7 +187,7 @@ require ( github.com/snowflakedb/gosnowflake v1.11.2 github.com/srebhan/cborquery v1.0.1 github.com/srebhan/protobufquery v1.0.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.34.0 github.com/testcontainers/testcontainers-go/modules/kafka v0.34.0 @@ -214,8 +214,8 @@ require ( go.step.sm/crypto v0.54.0 golang.org/x/crypto v0.31.0 golang.org/x/mod v0.21.0 - golang.org/x/net v0.31.0 - golang.org/x/oauth2 v0.23.0 + golang.org/x/net v0.32.0 + golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.28.0 golang.org/x/term v0.27.0 @@ -225,7 +225,7 @@ require ( google.golang.org/api v0.203.0 google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 google.golang.org/grpc v1.68.0 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.35.2 gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/olivere/elastic.v5 v5.0.86 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 diff --git a/go.sum b/go.sum index 30d42dde4bce9..d68cf935ee6b3 100644 --- a/go.sum +++ b/go.sum @@ -2144,8 +2144,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= -github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -2321,8 +2321,9 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA= @@ -2718,6 +2719,8 @@ golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2747,8 +2750,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3334,8 +3337,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 5dd7c5a0041134b68c014f55c3a325bbdc4b1ab3 Mon Sep 17 00:00:00 2001 From: Dmitry Khamitov Date: Tue, 17 Dec 2024 17:09:13 +0000 Subject: [PATCH 165/170] fix(inputs.mongodb): Do not dereference nil pointer if gathering database stats fails (#16310) --- plugins/inputs/mongodb/mongodb_server.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index b0ea0bb35ad29..c8369c68bb89b 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -327,7 +327,8 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe for _, name := range names { db, err := s.gatherDBStats(name) if err != nil { - s.log.Debugf("Error getting db stats from %q: %s", name, err.Error()) + s.log.Errorf("Error getting db stats from %q: %v", name, err) + continue } dbStats.Dbs = append(dbStats.Dbs, *db) } From 9d5133180393ac3f04fc68f709a9780532a1ee32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 17 Dec 2024 18:10:18 +0100 Subject: [PATCH 166/170] chore: Fix linter findings for `revive:exported` in `plugins/inputs/p*` (#16307) --- plugins/inputs/p4runtime/p4runtime_test.go | 18 +- plugins/inputs/passenger/passenger.go | 72 +++--- plugins/inputs/passenger/passenger_test.go | 8 +- plugins/inputs/pf/pf.go | 158 ++++++------ plugins/inputs/pgbouncer/pgbouncer.go | 18 +- plugins/inputs/phpfpm/child.go | 41 +--- plugins/inputs/phpfpm/fcgi_client.go | 2 +- plugins/inputs/phpfpm/fcgi_test.go | 4 +- plugins/inputs/phpfpm/phpfpm.go | 95 ++++--- plugins/inputs/phpfpm/phpfpm_test.go | 26 +- plugins/inputs/ping/ping.go | 144 +++++------ plugins/inputs/ping/ping_windows_test.go | 8 +- plugins/inputs/postfix/postfix.go | 60 ++--- plugins/inputs/postfix/postfix_windows.go | 6 +- plugins/inputs/postgresql/postgresql.go | 12 +- .../postgresql_extensible.go | 18 +- plugins/inputs/powerdns/powerdns.go | 9 +- .../powerdns_recursor/powerdns_recursor.go | 4 +- .../inputs/processes/processes_notwindows.go | 10 +- plugins/inputs/procstat/filter.go | 30 +-- plugins/inputs/procstat/native_finder.go | 52 ++-- plugins/inputs/procstat/native_finder_test.go | 20 +- plugins/inputs/procstat/os_linux.go | 28 +-- plugins/inputs/procstat/os_others.go | 16 +- plugins/inputs/procstat/os_windows.go | 20 +- plugins/inputs/procstat/pgrep.go | 30 +-- plugins/inputs/procstat/process.go | 68 ++--- plugins/inputs/procstat/procstat.go | 130 +++++----- plugins/inputs/procstat/procstat_test.go | 100 ++++---- plugins/inputs/procstat/service_finders.go | 15 +- plugins/inputs/prometheus/consul.go | 22 +- plugins/inputs/prometheus/kubernetes.go | 38 +-- plugins/inputs/prometheus/kubernetes_test.go | 48 ++-- plugins/inputs/prometheus/prometheus.go | 232 +++++++++--------- plugins/inputs/prometheus/prometheus_test.go | 2 +- plugins/inputs/proxmox/proxmox.go | 36 +-- plugins/inputs/proxmox/structs.go | 20 +- plugins/inputs/puppetagent/puppetagent.go | 9 +- 38 files changed, 777 insertions(+), 852 deletions(-) diff --git a/plugins/inputs/p4runtime/p4runtime_test.go b/plugins/inputs/p4runtime/p4runtime_test.go index 58dbb8336ceaa..2972963fc0fed 100644 --- a/plugins/inputs/p4runtime/p4runtime_test.go +++ b/plugins/inputs/p4runtime/p4runtime_test.go @@ -43,7 +43,7 @@ func createEntityCounterEntry( } } -func NewTestP4RuntimeClient( +func newTestP4RuntimeClient( p4RuntimeClient *fakeP4RuntimeClient, addr string, t *testing.T, @@ -102,7 +102,7 @@ func TestErrorGetP4Info(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.Error(t, plugin.Gather(&acc)) @@ -245,7 +245,7 @@ func TestOneCounterRead(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -333,7 +333,7 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -425,7 +425,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -457,7 +457,7 @@ func TestNoCountersAvailable(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -484,7 +484,7 @@ func TestFilterCounters(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) plugin.CounterNamesInclude = []string{"oof"} @@ -534,7 +534,7 @@ func TestFailReadCounterEntryFromEntry(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) @@ -577,7 +577,7 @@ func TestFailReadAllEntries(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) + plugin := newTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t) var acc testutil.Accumulator require.NoError(t, plugin.Gather(&acc)) diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 7123cc70b012d..0175a5000ced5 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -19,22 +19,8 @@ import ( //go:embed sample.conf var sampleConfig string -type passenger struct { - Command string -} - -func (p *passenger) parseCommand() (string, []string) { - var arguments []string - if !strings.Contains(p.Command, " ") { - return p.Command, arguments - } - - arguments = strings.Split(p.Command, " ") - if len(arguments) == 1 { - return arguments[0], arguments[1:] - } - - return arguments[0], arguments[1:] +type Passenger struct { + Command string `toml:"command"` } type info struct { @@ -91,6 +77,39 @@ type process struct { ProcessGroupID string `xml:"process_group_id"` } +func (*Passenger) SampleConfig() string { + return sampleConfig +} + +func (p *Passenger) Gather(acc telegraf.Accumulator) error { + if p.Command == "" { + p.Command = "passenger-status -v --show=xml" + } + + cmd, args := p.parseCommand() + out, err := exec.Command(cmd, args...).Output() + + if err != nil { + return err + } + + return importMetric(out, acc) +} + +func (p *Passenger) parseCommand() (string, []string) { + var arguments []string + if !strings.Contains(p.Command, " ") { + return p.Command, arguments + } + + arguments = strings.Split(p.Command, " ") + if len(arguments) == 1 { + return arguments[0], arguments[1:] + } + + return arguments[0], arguments[1:] +} + func (p *process) getUptime() int64 { if p.Uptime == "" { return 0 @@ -131,25 +150,6 @@ func (p *process) getUptime() int64 { return uptime } -func (*passenger) SampleConfig() string { - return sampleConfig -} - -func (p *passenger) Gather(acc telegraf.Accumulator) error { - if p.Command == "" { - p.Command = "passenger-status -v --show=xml" - } - - cmd, args := p.parseCommand() - out, err := exec.Command(cmd, args...).Output() - - if err != nil { - return err - } - - return importMetric(out, acc) -} - func importMetric(stat []byte, acc telegraf.Accumulator) error { var p info @@ -231,6 +231,6 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { func init() { inputs.Add("passenger", func() telegraf.Input { - return &passenger{} + return &Passenger{} }) } diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index 6c53578d7e636..49411d04919d5 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -39,7 +39,7 @@ func teardown(tempFilePath string) { } func Test_Invalid_Passenger_Status_Cli(t *testing.T) { - r := &passenger{ + r := &Passenger{ Command: "an-invalid-command passenger-status", } @@ -55,7 +55,7 @@ func Test_Invalid_Xml(t *testing.T) { require.NoError(t, err) defer teardown(tempFilePath) - r := &passenger{ + r := &Passenger{ Command: tempFilePath, } @@ -72,7 +72,7 @@ func Test_Default_Config_Load_Default_Command(t *testing.T) { require.NoError(t, err) defer teardown(tempFilePath) - r := &passenger{} + r := &Passenger{} var acc testutil.Accumulator @@ -87,7 +87,7 @@ func TestPassengerGenerateMetric(t *testing.T) { defer teardown(tempFilePath) // Now we tested again above server, with our authentication data - r := &passenger{ + r := &Passenger{ Command: tempFilePath, } diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 204c30a5dbc96..20709aaf750d9 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -18,26 +18,81 @@ import ( //go:embed sample.conf var sampleConfig string -const measurement = "pf" -const pfctlCommand = "pfctl" +var ( + errParseHeader = fmt.Errorf("cannot find header in %s output", pfctlCommand) + anyTableHeaderRE = regexp.MustCompile("^[A-Z]") + stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) + counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) + execLookPath = exec.LookPath + execCommand = exec.Command + pfctlOutputStanzas = []*pfctlOutputStanza{ + { + headerRE: regexp.MustCompile("^State Table"), + parseFunc: parseStateTable, + }, + { + headerRE: regexp.MustCompile("^Counters"), + parseFunc: parseCounterTable, + }, + } + stateTable = []*entry{ + {"entries", "current entries", -1}, + {"searches", "searches", -1}, + {"inserts", "inserts", -1}, + {"removals", "removals", -1}, + } + counterTable = []*entry{ + {"match", "match", -1}, + {"bad-offset", "bad-offset", -1}, + {"fragment", "fragment", -1}, + {"short", "short", -1}, + {"normalize", "normalize", -1}, + {"memory", "memory", -1}, + {"bad-timestamp", "bad-timestamp", -1}, + {"congestion", "congestion", -1}, + {"ip-option", "ip-option", -1}, + {"proto-cksum", "proto-cksum", -1}, + {"state-mismatch", "state-mismatch", -1}, + {"state-insert", "state-insert", -1}, + {"state-limit", "state-limit", -1}, + {"src-limit", "src-limit", -1}, + {"synproxy", "synproxy", -1}, + } +) + +const ( + measurement = "pf" + pfctlCommand = "pfctl" +) type PF struct { - PfctlCommand string - PfctlArgs []string - UseSudo bool - StateTable []*Entry + UseSudo bool `toml:"use_sudo"` + + pfctlCommand string + pfctlArgs []string infoFunc func() (string, error) } +type pfctlOutputStanza struct { + headerRE *regexp.Regexp + parseFunc func([]string, map[string]interface{}) error + found bool +} + +type entry struct { + field string + pfctlTitle string + value int64 +} + func (*PF) SampleConfig() string { return sampleConfig } -// Gather is the entrypoint for the plugin. func (pf *PF) Gather(acc telegraf.Accumulator) error { - if pf.PfctlCommand == "" { + if pf.pfctlCommand == "" { var err error - if pf.PfctlCommand, pf.PfctlArgs, err = pf.buildPfctlCmd(); err != nil { + if pf.pfctlCommand, pf.pfctlArgs, err = pf.buildPfctlCmd(); err != nil { acc.AddError(fmt.Errorf("can't construct pfctl commandline: %w", err)) return nil } @@ -55,38 +110,17 @@ func (pf *PF) Gather(acc telegraf.Accumulator) error { return nil } -var errParseHeader = fmt.Errorf("cannot find header in %s output", pfctlCommand) - func errMissingData(tag string) error { return fmt.Errorf("struct data for tag %q not found in %s output", tag, pfctlCommand) } -type pfctlOutputStanza struct { - HeaderRE *regexp.Regexp - ParseFunc func([]string, map[string]interface{}) error - Found bool -} - -var pfctlOutputStanzas = []*pfctlOutputStanza{ - { - HeaderRE: regexp.MustCompile("^State Table"), - ParseFunc: parseStateTable, - }, - { - HeaderRE: regexp.MustCompile("^Counters"), - ParseFunc: parseCounterTable, - }, -} - -var anyTableHeaderRE = regexp.MustCompile("^[A-Z]") - func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(strings.NewReader(pfoutput)) for scanner.Scan() { line := scanner.Text() for _, s := range pfctlOutputStanzas { - if s.HeaderRE.MatchString(line) { + if s.headerRE.MatchString(line) { var stanzaLines []string scanner.Scan() line = scanner.Text() @@ -98,15 +132,15 @@ func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error } line = scanner.Text() } - if perr := s.ParseFunc(stanzaLines, fields); perr != nil { + if perr := s.parseFunc(stanzaLines, fields); perr != nil { return perr } - s.Found = true + s.found = true } } } for _, s := range pfctlOutputStanzas { - if !s.Found { + if !s.found { return errParseHeader } } @@ -115,57 +149,22 @@ func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error return nil } -type Entry struct { - Field string - PfctlTitle string - Value int64 -} - -var StateTable = []*Entry{ - {"entries", "current entries", -1}, - {"searches", "searches", -1}, - {"inserts", "inserts", -1}, - {"removals", "removals", -1}, -} - -var stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) - func parseStateTable(lines []string, fields map[string]interface{}) error { - return storeFieldValues(lines, stateTableRE, fields, StateTable) + return storeFieldValues(lines, stateTableRE, fields, stateTable) } -var CounterTable = []*Entry{ - {"match", "match", -1}, - {"bad-offset", "bad-offset", -1}, - {"fragment", "fragment", -1}, - {"short", "short", -1}, - {"normalize", "normalize", -1}, - {"memory", "memory", -1}, - {"bad-timestamp", "bad-timestamp", -1}, - {"congestion", "congestion", -1}, - {"ip-option", "ip-option", -1}, - {"proto-cksum", "proto-cksum", -1}, - {"state-mismatch", "state-mismatch", -1}, - {"state-insert", "state-insert", -1}, - {"state-limit", "state-limit", -1}, - {"src-limit", "src-limit", -1}, - {"synproxy", "synproxy", -1}, -} - -var counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) - func parseCounterTable(lines []string, fields map[string]interface{}) error { - return storeFieldValues(lines, counterTableRE, fields, CounterTable) + return storeFieldValues(lines, counterTableRE, fields, counterTable) } -func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*Entry) error { +func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*entry) error { for _, v := range lines { entries := regex.FindStringSubmatch(v) if entries != nil { for _, f := range entryTable { - if f.PfctlTitle == entries[1] { + if f.pfctlTitle == entries[1] { var err error - if f.Value, err = strconv.ParseInt(entries[2], 10, 64); err != nil { + if f.value, err = strconv.ParseInt(entries[2], 10, 64); err != nil { return err } } @@ -174,17 +173,17 @@ func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]in } for _, v := range entryTable { - if v.Value == -1 { - return errMissingData(v.PfctlTitle) + if v.value == -1 { + return errMissingData(v.pfctlTitle) } - fields[v.Field] = v.Value + fields[v.field] = v.value } return nil } func (pf *PF) callPfctl() (string, error) { - cmd := execCommand(pf.PfctlCommand, pf.PfctlArgs...) + cmd := execCommand(pf.pfctlCommand, pf.pfctlArgs...) out, oerr := cmd.Output() if oerr != nil { var ee *exec.ExitError @@ -196,9 +195,6 @@ func (pf *PF) callPfctl() (string, error) { return string(out), oerr } -var execLookPath = exec.LookPath -var execCommand = exec.Command - func (pf *PF) buildPfctlCmd() (string, []string, error) { cmd, err := execLookPath(pfctlCommand) if err != nil { diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 4d079e1731f0a..2c6ccf43bc4bd 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -16,6 +16,11 @@ import ( //go:embed sample.conf var sampleConfig string +var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, + "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, + "force_user": true, "host": true, "port": true, "name": true, +} + type PgBouncer struct { ShowCommands []string `toml:"show_commands"` postgresql.Config @@ -23,11 +28,6 @@ type PgBouncer struct { service *postgresql.Service } -var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, - "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, - "force_user": true, "host": true, "port": true, "name": true, -} - func (*PgBouncer) SampleConfig() string { return sampleConfig } @@ -58,10 +58,6 @@ func (p *PgBouncer) Start(_ telegraf.Accumulator) error { return p.service.Start() } -func (p *PgBouncer) Stop() { - p.service.Stop() -} - func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { for _, cmd := range p.ShowCommands { switch cmd { @@ -87,6 +83,10 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { return nil } +func (p *PgBouncer) Stop() { + p.service.Stop() +} + func (p *PgBouncer) accRow(row *sql.Rows, columns []string) (map[string]string, map[string]*interface{}, error) { var dbname bytes.Buffer diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 3448db40be4a9..f921dc4bf13d2 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,10 +10,8 @@ import ( "errors" "fmt" "io" - "net" "net/http" "net/http/cgi" - "os" "strings" "sync" "time" @@ -164,13 +162,13 @@ var errCloseConn = errors.New("fcgi: connection should be closed") var emptyBody = io.NopCloser(strings.NewReader("")) -// ErrRequestAborted is returned by Read when a handler attempts to read the +// errRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. -var ErrRequestAborted = errors.New("fcgi: request aborted by web server") +var errRequestAborted = errors.New("fcgi: request aborted by web server") -// ErrConnClosed is returned by Read when a handler attempts to read the body of +// errConnClosed is returned by Read when a handler attempts to read the body of // a request after the connection to the web server has been closed. -var ErrConnClosed = errors.New("fcgi: connection to web server closed") +var errConnClosed = errors.New("fcgi: connection to web server closed") func (c *child) handleRecord(rec *record) error { c.mu.Lock() @@ -249,7 +247,7 @@ func (c *child) handleRecord(rec *record) error { return err } if req.pw != nil { - req.pw.CloseWithError(ErrRequestAborted) + req.pw.CloseWithError(errRequestAborted) } if !req.keepConn { // connection will close upon return @@ -306,34 +304,7 @@ func (c *child) cleanUp() { if req.pw != nil { // race with call to Close in c.serveRequest doesn't matter because // Pipe(Reader|Writer).Close are idempotent - req.pw.CloseWithError(ErrConnClosed) + req.pw.CloseWithError(errConnClosed) } } } - -// Serve accepts incoming FastCGI connections on the listener l, creating a new -// goroutine for each. The goroutine reads requests and then calls handler -// to reply to them. -// If l is nil, Serve accepts connections from os.Stdin. -// If handler is nil, http.DefaultServeMux is used. -func Serve(l net.Listener, handler http.Handler) error { - if l == nil { - var err error - l, err = net.FileListener(os.Stdin) - if err != nil { - return err - } - defer l.Close() - } - if handler == nil { - handler = http.DefaultServeMux - } - for { - rw, err := l.Accept() - if err != nil { - return err - } - c := newChild(rw, handler) - go c.serve() - } -} diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index f33b68d0af9a5..e982471b3d0e6 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -44,7 +44,7 @@ func newFcgiClient(timeout time.Duration, h string, args ...interface{}) (*conn, return &conn{rwc: con}, nil } -func (c *conn) Request(env map[string]string, requestData string) (retout, reterr []byte, err error) { +func (c *conn) request(env map[string]string, requestData string) (retout, reterr []byte, err error) { defer c.rwc.Close() var reqID uint16 = 1 diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index f96c22b6fec90..d039685bb05f8 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -206,7 +206,7 @@ var cleanUpTests = []struct { makeRecord(typeAbortRequest, nil), }, nil), - ErrRequestAborted, + errRequestAborted, }, // confirm that child.serve closes all pipes after error reading record { @@ -215,7 +215,7 @@ var cleanUpTests = []struct { nil, }, nil), - ErrConnClosed, + errConnClosed, }, } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index e1b3ce515fd30..9b3c5dc2704c4 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -26,22 +26,31 @@ import ( var sampleConfig string const ( - PfPool = "pool" - PfProcessManager = "process manager" - PfStartSince = "start since" - PfAcceptedConn = "accepted conn" - PfListenQueue = "listen queue" - PfMaxListenQueue = "max listen queue" - PfListenQueueLen = "listen queue len" - PfIdleProcesses = "idle processes" - PfActiveProcesses = "active processes" - PfTotalProcesses = "total processes" - PfMaxActiveProcesses = "max active processes" - PfMaxChildrenReached = "max children reached" - PfSlowRequests = "slow requests" + pfPool = "pool" + pfStartSince = "start since" + pfAcceptedConn = "accepted conn" + pfListenQueue = "listen queue" + pfMaxListenQueue = "max listen queue" + pfListenQueueLen = "listen queue len" + pfIdleProcesses = "idle processes" + pfActiveProcesses = "active processes" + pfTotalProcesses = "total processes" + pfMaxActiveProcesses = "max active processes" + pfMaxChildrenReached = "max children reached" + pfSlowRequests = "slow requests" ) -type JSONMetrics struct { +type Phpfpm struct { + Format string `toml:"format"` + Timeout config.Duration `toml:"timeout"` + Urls []string `toml:"urls"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig + + client *http.Client +} + +type jsonMetrics struct { Pool string `json:"pool"` ProcessManager string `json:"process manager"` StartTime int `json:"start time"` @@ -76,21 +85,11 @@ type JSONMetrics struct { type metricStat map[string]int64 type poolStat map[string]metricStat -type phpfpm struct { - Format string `toml:"format"` - Timeout config.Duration `toml:"timeout"` - Urls []string `toml:"urls"` - Log telegraf.Logger `toml:"-"` - tls.ClientConfig - - client *http.Client -} - -func (*phpfpm) SampleConfig() string { +func (*Phpfpm) SampleConfig() string { return sampleConfig } -func (p *phpfpm) Init() error { +func (p *Phpfpm) Init() error { if len(p.Urls) == 0 { p.Urls = []string{"http://127.0.0.1/status"} } @@ -118,9 +117,7 @@ func (p *phpfpm) Init() error { return nil } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (p *phpfpm) Gather(acc telegraf.Accumulator) error { +func (p *Phpfpm) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, serv := range expandUrls(acc, p.Urls) { wg.Add(1) @@ -136,7 +133,7 @@ func (p *phpfpm) Gather(acc telegraf.Accumulator) error { } // Request status page to get stat raw data and import it -func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { +func (p *Phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { return p.gatherHTTP(addr, acc) } @@ -187,8 +184,8 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { } // Gather stat using fcgi protocol -func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error { - fpmOutput, fpmErr, err := fcgi.Request(map[string]string{ +func (p *Phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error { + fpmOutput, fpmErr, err := fcgi.request(map[string]string{ "SCRIPT_NAME": "/" + statusPath, "SCRIPT_FILENAME": statusPath, "REQUEST_METHOD": "GET", @@ -206,7 +203,7 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula } // Gather stat using http protocol -func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { +func (p *Phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { return fmt.Errorf("unable parse server address %q: %w", addr, err) @@ -232,7 +229,7 @@ func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { } // Import stat data into Telegraf system -func (p *phpfpm) importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { +func (p *Phpfpm) importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { if p.Format == "json" { p.parseJSON(r, acc, addr) } else { @@ -254,7 +251,7 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { } fieldName := strings.Trim(keyvalue[0], " ") // We start to gather data for a new pool here - if fieldName == PfPool { + if fieldName == pfPool { currentPool = strings.Trim(keyvalue[1], " ") stats[currentPool] = make(metricStat) continue @@ -262,17 +259,17 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { // Start to parse metric for current pool switch fieldName { - case PfStartSince, - PfAcceptedConn, - PfListenQueue, - PfMaxListenQueue, - PfListenQueueLen, - PfIdleProcesses, - PfActiveProcesses, - PfTotalProcesses, - PfMaxActiveProcesses, - PfMaxChildrenReached, - PfSlowRequests: + case pfStartSince, + pfAcceptedConn, + pfListenQueue, + pfMaxListenQueue, + pfListenQueueLen, + pfIdleProcesses, + pfActiveProcesses, + pfTotalProcesses, + pfMaxActiveProcesses, + pfMaxChildrenReached, + pfSlowRequests: fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) if err == nil { stats[currentPool][fieldName] = fieldValue @@ -294,8 +291,8 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { } } -func (p *phpfpm) parseJSON(r io.Reader, acc telegraf.Accumulator, addr string) { - var metrics JSONMetrics +func (p *Phpfpm) parseJSON(r io.Reader, acc telegraf.Accumulator, addr string) { + var metrics jsonMetrics if err := json.NewDecoder(r).Decode(&metrics); err != nil { p.Log.Errorf("Unable to decode JSON response: %s", err) return @@ -402,6 +399,6 @@ func isNetworkURL(addr string) bool { func init() { inputs.Add("phpfpm", func() telegraf.Input { - return &phpfpm{} + return &Phpfpm{} }) } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 92b3affa7ad08..802c761532ccc 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -56,7 +56,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { defer ts.Close() url := ts.URL + "?test=ok" - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{url}, Log: &testutil.Logger{}, } @@ -106,7 +106,7 @@ func TestPhpFpmGeneratesJSONMetrics_From_Http(t *testing.T) { expected, err := testutil.ParseMetricsFromFile("testdata/expected.out", parser) require.NoError(t, err) - input := &phpfpm{ + input := &Phpfpm{ Urls: []string{server.URL + "?full&json"}, Format: "json", Log: &testutil.Logger{}, @@ -128,7 +128,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, Log: &testutil.Logger{}, } @@ -179,7 +179,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) { }() // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, Timeout: config.Duration(timeout), Log: &testutil.Logger{}, @@ -211,7 +211,7 @@ func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) { const timeout = 200 * time.Millisecond // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcpAddress + "/status"}, Timeout: config.Duration(timeout), Log: &testutil.Logger{}, @@ -237,7 +237,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { s := statServer{} go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{tcp.Addr().String()}, Log: &testutil.Logger{}, } @@ -289,7 +289,7 @@ func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { go fcgi.Serve(tcp1, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway go fcgi.Serve(tcp2, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, Log: &testutil.Logger{}, } @@ -340,7 +340,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { s := statServer{} go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{tcp.Addr().String() + ":custom-status-path"}, Log: &testutil.Logger{}, } @@ -374,7 +374,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // When not passing server config, we default to localhost // We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"http://bad.localhost:62001/status"}, Log: &testutil.Logger{}, } @@ -389,7 +389,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t t.Skip("Skipping long test in short mode") } - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"http://aninvalidone"}, Log: &testutil.Logger{}, } @@ -402,7 +402,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"/tmp/invalid.sock"}, Log: &testutil.Logger{}, } @@ -435,7 +435,7 @@ var outputSampleJSON []byte func TestPhpFpmParseJSON_Log_Error_Without_Panic_When_When_JSON_Is_Invalid(t *testing.T) { // Capture the logging output for checking logger := &testutil.CaptureLogger{Name: "inputs.phpfpm"} - plugin := &phpfpm{Log: logger} + plugin := &Phpfpm{Log: logger} require.NoError(t, plugin.Init()) // parse valid JSON without panic and without log output @@ -459,7 +459,7 @@ func TestGatherDespiteUnavailable(t *testing.T) { go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway // Now we tested again above server - r := &phpfpm{ + r := &Phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status", "/lala"}, Log: &testutil.Logger{}, } diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 9f2e692f1cf70..8538d394bc809 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -28,71 +28,69 @@ const ( defaultPingDataBytesSize = 56 ) -// HostPinger is a function that runs the "ping" function using a list of -// passed arguments. This can be easily switched with a mocked ping function -// for unit test purposes (see ping_test.go) -type HostPinger func(binary string, timeout float64, args ...string) (string, error) - type Ping struct { - // wg is used to wait for ping with multiple URLs - wg sync.WaitGroup - - // Pre-calculated interval and timeout - calcInterval time.Duration - calcTimeout time.Duration - - sourceAddress string - - Log telegraf.Logger `toml:"-"` - - // Interval at which to ping (ping -i ) - PingInterval float64 `toml:"ping_interval"` - - // Number of pings to send (ping -c ) - Count int - - // Per-ping timeout, in seconds. 0 means no timeout (ping -W ) - Timeout float64 - - // Ping deadline, in seconds. 0 means no deadline. (ping -w ) - Deadline int - - // Interface or source address to send ping from (ping -I/-S ) - Interface string - - // URLs to ping - Urls []string - - // Method defines how to ping (native or exec) - Method string + Urls []string `toml:"urls"` // URLs to ping + Method string `toml:"method"` // Method defines how to ping (native or exec) + Count int `toml:"count"` // Number of pings to send (ping -c ) + PingInterval float64 `toml:"ping_interval"` // Interval at which to ping (ping -i ) + Timeout float64 `toml:"timeout"` // Per-ping timeout, in seconds. 0 means no timeout (ping -W ) + Deadline int `toml:"deadline"` // Ping deadline, in seconds. 0 means no deadline. (ping -w ) + Interface string `toml:"interface"` // Interface or source address to send ping from (ping -I/-S ) + Percentiles []int `toml:"percentiles"` // Calculate the given percentiles when using native method + Binary string `toml:"binary"` // Ping executable binary + // Arguments for ping command. When arguments are not empty, system binary will be used and other options (ping_interval, timeout, etc.) will be ignored + Arguments []string `toml:"arguments"` + IPv4 bool `toml:"ipv4"` // Whether to resolve addresses using ipv4 or not. + IPv6 bool `toml:"ipv6"` // Whether to resolve addresses using ipv6 or not. + Size *int `toml:"size"` // Packet size + Log telegraf.Logger `toml:"-"` + + wg sync.WaitGroup // wg is used to wait for ping with multiple URLs + calcInterval time.Duration // Pre-calculated interval and timeout + calcTimeout time.Duration + sourceAddress string + pingHost hostPingerFunc // host ping function + nativePingFunc nativePingFunc +} - // Ping executable binary - Binary string +// hostPingerFunc is a function that runs the "ping" function using a list of +// passed arguments. This can be easily switched with a mocked ping function +// for unit test purposes (see ping_test.go) +type hostPingerFunc func(binary string, timeout float64, args ...string) (string, error) - // Arguments for ping command. When arguments is not empty, system binary will be used and - // other options (ping_interval, timeout, etc.) will be ignored - Arguments []string +type nativePingFunc func(destination string) (*pingStats, error) - // Whether to resolve addresses using ipv4 or not. - IPv4 bool +type durationSlice []time.Duration - // Whether to resolve addresses using ipv6 or not. - IPv6 bool +type pingStats struct { + ping.Statistics + ttl int +} - // host ping function - pingHost HostPinger +func (*Ping) SampleConfig() string { + return sampleConfig +} - nativePingFunc NativePingFunc +func (p *Ping) Init() error { + if p.Count < 1 { + return errors.New("bad number of packets to transmit") + } - // Calculate the given percentiles when using native method - Percentiles []int + // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping + if p.PingInterval < 0.2 { + p.calcInterval = time.Duration(.2 * float64(time.Second)) + } else { + p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) + } - // Packet size - Size *int -} + // If no timeout is given default to 5 seconds, matching original implementation + if p.Timeout == 0 { + p.calcTimeout = time.Duration(5) * time.Second + } else { + p.calcTimeout = time.Duration(p.Timeout) * time.Second + } -func (*Ping) SampleConfig() string { - return sampleConfig + return nil } func (p *Ping) Gather(acc telegraf.Accumulator) error { @@ -115,13 +113,6 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return nil } -type pingStats struct { - ping.Statistics - ttl int -} - -type NativePingFunc func(destination string) (*pingStats, error) - func (p *Ping) nativePing(destination string) (*pingStats, error) { ps := &pingStats{} @@ -259,11 +250,11 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { acc.AddFields("ping", fields, tags) } -type durationSlice []time.Duration +func (p durationSlice) Len() int { return len(p) } -func (p durationSlice) Len() int { return len(p) } func (p durationSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // R7 from Hyndman and Fan (1996), which matches Excel func percentile(values durationSlice, perc int) time.Duration { @@ -292,29 +283,6 @@ func percentile(values durationSlice, perc int) time.Duration { return lower + time.Duration(rankFraction*float64(upper-lower)) } -// Init ensures the plugin is configured correctly. -func (p *Ping) Init() error { - if p.Count < 1 { - return errors.New("bad number of packets to transmit") - } - - // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping - if p.PingInterval < 0.2 { - p.calcInterval = time.Duration(.2 * float64(time.Second)) - } else { - p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) - } - - // If no timeout is given default to 5 seconds, matching original implementation - if p.Timeout == 0 { - p.calcTimeout = time.Duration(5) * time.Second - } else { - p.calcTimeout = time.Duration(p.Timeout) * time.Second - } - - return nil -} - func hostPinger(binary string, timeout float64, args ...string) (string, error) { bin, err := exec.LookPath(binary) if err != nil { diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 4517bf8f33736..93b2bd04ff99a 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -261,7 +261,7 @@ func TestFatalPingGather(t *testing.T) { "Fatal ping should not have packet measurements") } -var UnreachablePingOutput = ` +var unreachablePingOutput = ` Pinging www.google.pl [8.8.8.8] with 32 bytes of data: Request timed out. Request timed out. @@ -273,7 +273,7 @@ Ping statistics for 8.8.8.8: ` func mockUnreachableHostPinger(string, float64, ...string) (string, error) { - return UnreachablePingOutput, errors.New("so very bad") + return unreachablePingOutput, errors.New("so very bad") } // Reply from 185.28.251.217: TTL expired in transit. @@ -312,7 +312,7 @@ func TestUnreachablePingGather(t *testing.T) { "Fatal ping should not have packet measurements") } -var TTLExpiredPingOutput = ` +var ttlExpiredPingOutput = ` Pinging www.google.pl [8.8.8.8] with 32 bytes of data: Request timed out. Request timed out. @@ -324,7 +324,7 @@ Ping statistics for 8.8.8.8: ` func mockTTLExpiredPinger(string, float64, ...string) (string, error) { - return TTLExpiredPingOutput, errors.New("so very bad") + return ttlExpiredPingOutput, errors.New("so very bad") } // in case 'Destination net unreachable' ping app return receive packet which is not what we need diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index cc5c7024c57e8..f657404d2882e 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -21,6 +21,36 @@ import ( //go:embed sample.conf var sampleConfig string +type Postfix struct { + QueueDirectory string `toml:"queue_directory"` +} + +func (*Postfix) SampleConfig() string { + return sampleConfig +} + +func (p *Postfix) Gather(acc telegraf.Accumulator) error { + if p.QueueDirectory == "" { + var err error + p.QueueDirectory, err = getQueueDirectory() + if err != nil { + return fmt.Errorf("unable to determine queue directory: %w", err) + } + } + + for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { + fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) + if err != nil { + acc.AddError(fmt.Errorf("error scanning queue %q: %w", q, err)) + continue + } + + acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) + } + + return nil +} + func getQueueDirectory() (string, error) { qd, err := exec.Command("postconf", "-h", "queue_directory").Output() if err != nil { @@ -75,36 +105,6 @@ func qScan(path string, acc telegraf.Accumulator) (map[string]interface{}, error return fields, nil } -type Postfix struct { - QueueDirectory string -} - -func (*Postfix) SampleConfig() string { - return sampleConfig -} - -func (p *Postfix) Gather(acc telegraf.Accumulator) error { - if p.QueueDirectory == "" { - var err error - p.QueueDirectory, err = getQueueDirectory() - if err != nil { - return fmt.Errorf("unable to determine queue directory: %w", err) - } - } - - for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { - fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) - if err != nil { - acc.AddError(fmt.Errorf("error scanning queue %q: %w", q, err)) - continue - } - - acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) - } - - return nil -} - func init() { inputs.Add("postfix", func() telegraf.Input { return &Postfix{ diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go index 3b027f24a2ade..9831787ff7194 100644 --- a/plugins/inputs/postfix/postfix_windows.go +++ b/plugins/inputs/postfix/postfix_windows.go @@ -16,11 +16,13 @@ type Postfix struct { Log telegraf.Logger `toml:"-"` } +func (*Postfix) SampleConfig() string { return sampleConfig } + func (p *Postfix) Init() error { - p.Log.Warn("current platform is not supported") + p.Log.Warn("Current platform is not supported") return nil } -func (*Postfix) SampleConfig() string { return sampleConfig } + func (*Postfix) Gather(_ telegraf.Accumulator) error { return nil } func init() { diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index dc8f37ca8d6d3..46b2354874cb2 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -16,6 +16,8 @@ import ( //go:embed sample.conf var sampleConfig string +var ignoredColumns = map[string]bool{"stats_reset": true} + type Postgresql struct { Databases []string `toml:"databases"` IgnoredDatabases []string `toml:"ignored_databases"` @@ -25,8 +27,6 @@ type Postgresql struct { service *postgresql.Service } -var ignoredColumns = map[string]bool{"stats_reset": true} - func (*Postgresql) SampleConfig() string { return sampleConfig } @@ -47,10 +47,6 @@ func (p *Postgresql) Start(_ telegraf.Accumulator) error { return p.service.Start() } -func (p *Postgresql) Stop() { - p.service.Stop() -} - func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var query string if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 { @@ -106,6 +102,10 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { return bgWriterRow.Err() } +func (p *Postgresql) Stop() { + p.service.Stop() +} + func (p *Postgresql) accRow(row *sql.Rows, acc telegraf.Accumulator, columns []string) error { var dbname bytes.Buffer diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index a4d867b8435c6..cb10f266bcedd 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -21,6 +21,8 @@ import ( //go:embed sample.conf var sampleConfig string +var ignoredColumns = map[string]bool{"stats_reset": true} + type Postgresql struct { Databases []string `deprecated:"1.22.4;use the sqlquery option to specify database to use"` Query []query `toml:"query"` @@ -45,7 +47,9 @@ type query struct { additionalTags map[string]bool } -var ignoredColumns = map[string]bool{"stats_reset": true} +type scanner interface { + Scan(dest ...interface{}) error +} func (*Postgresql) SampleConfig() string { return sampleConfig @@ -102,10 +106,6 @@ func (p *Postgresql) Start(_ telegraf.Accumulator) error { return p.service.Start() } -func (p *Postgresql) Stop() { - p.service.Stop() -} - func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // Retrieving the database version query := `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` @@ -128,6 +128,10 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { return nil } +func (p *Postgresql) Stop() { + p.service.Stop() +} + func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, q query, timestamp time.Time) error { rows, err := p.service.DB.Query(q.Sqlquery) if err != nil { @@ -150,10 +154,6 @@ func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, q query, t return nil } -type scanner interface { - Scan(dest ...interface{}) error -} - func (p *Postgresql) accRow(acc telegraf.Accumulator, row scanner, columns []string, q query, timestamp time.Time) error { // this is where we'll store the column name with its *interface{} columnMap := make(map[string]*interface{}) diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 44c765348a646..5ac8397e077fc 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -19,14 +19,13 @@ import ( //go:embed sample.conf var sampleConfig string -type Powerdns struct { - UnixSockets []string +const defaultTimeout = 5 * time.Second - Log telegraf.Logger `toml:"-"` +type Powerdns struct { + UnixSockets []string `toml:"unix_sockets"` + Log telegraf.Logger `toml:"-"` } -var defaultTimeout = 5 * time.Second - func (*Powerdns) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 48e83179a4746..48a77518f5a6a 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -14,6 +14,8 @@ import ( //go:embed sample.conf var sampleConfig string +const defaultTimeout = 5 * time.Second + type PowerdnsRecursor struct { UnixSockets []string `toml:"unix_sockets"` SocketDir string `toml:"socket_dir"` @@ -26,8 +28,6 @@ type PowerdnsRecursor struct { gatherFromServer func(address string, acc telegraf.Accumulator) error } -var defaultTimeout = 5 * time.Second - func (*PowerdnsRecursor) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index c574238fd5a23..e476e8ff2454f 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -19,15 +19,13 @@ import ( ) type Processes struct { - UseSudo bool `toml:"use_sudo"` + UseSudo bool `toml:"use_sudo"` + Log telegraf.Logger `toml:"-"` execPS func(UseSudo bool) ([]byte, error) readProcFile func(filename string) ([]byte, error) - - Log telegraf.Logger - - forcePS bool - forceProc bool + forcePS bool + forceProc bool } func (p *Processes) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/procstat/filter.go b/plugins/inputs/procstat/filter.go index 3c090549c0718..d8f621048b77e 100644 --- a/plugins/inputs/procstat/filter.go +++ b/plugins/inputs/procstat/filter.go @@ -7,13 +7,13 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" + telegraf_filter "github.com/influxdata/telegraf/filter" ) -type Filter struct { +type filter struct { Name string `toml:"name"` PidFiles []string `toml:"pid_files"` SystemdUnits []string `toml:"systemd_units"` @@ -29,13 +29,13 @@ type Filter struct { filterSupervisorUnit string filterCmds []*regexp.Regexp - filterUser filter.Filter - filterExecutable filter.Filter - filterProcessName filter.Filter + filterUser telegraf_filter.Filter + filterExecutable telegraf_filter.Filter + filterProcessName telegraf_filter.Filter finder *processFinder } -func (f *Filter) Init() error { +func (f *filter) init() error { if f.Name == "" { return errors.New("filter must be named") } @@ -74,13 +74,13 @@ func (f *Filter) Init() error { f.filterSupervisorUnit = strings.TrimSpace(strings.Join(f.SupervisorUnits, " ")) var err error - if f.filterUser, err = filter.Compile(f.Users); err != nil { + if f.filterUser, err = telegraf_filter.Compile(f.Users); err != nil { return fmt.Errorf("compiling users filter for %q failed: %w", f.Name, err) } - if f.filterExecutable, err = filter.Compile(f.Executables); err != nil { + if f.filterExecutable, err = telegraf_filter.Compile(f.Executables); err != nil { return fmt.Errorf("compiling executables filter for %q failed: %w", f.Name, err) } - if f.filterProcessName, err = filter.Compile(f.ProcessNames); err != nil { + if f.filterProcessName, err = telegraf_filter.Compile(f.ProcessNames); err != nil { return fmt.Errorf("compiling process-names filter for %q failed: %w", f.Name, err) } @@ -89,7 +89,7 @@ func (f *Filter) Init() error { return nil } -func (f *Filter) ApplyFilter() ([]processGroup, error) { +func (f *filter) applyFilter() ([]processGroup, error) { // Determine processes on service level. if there is no constraint on the // services, use all processes for matching. var groups []processGroup @@ -125,7 +125,7 @@ func (f *Filter) ApplyFilter() ([]processGroup, error) { } groups = append(groups, g...) default: - procs, err := process.Processes() + procs, err := gopsprocess.Processes() if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (f *Filter) ApplyFilter() ([]processGroup, error) { // Filter by additional properties such as users, patterns etc result := make([]processGroup, 0, len(groups)) for _, g := range groups { - var matched []*process.Process + var matched []*gopsprocess.Process for _, p := range g.processes { // Users if f.filterUser != nil { @@ -218,13 +218,13 @@ func (f *Filter) ApplyFilter() ([]processGroup, error) { return result, nil } -func getChildren(p *process.Process) ([]*process.Process, error) { +func getChildren(p *gopsprocess.Process) ([]*gopsprocess.Process, error) { children, err := p.Children() // Check for cases that do not really mean error but rather means that there // is no match. switch { case err == nil, - errors.Is(err, process.ErrorNoChildren), + errors.Is(err, gopsprocess.ErrorNoChildren), strings.Contains(err.Error(), "exit status 1"): return children, nil } diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 5f9812782b094..192a431acd503 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -7,16 +7,16 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" ) // NativeFinder uses gopsutil to find processes type NativeFinder struct{} // Uid will return all pids for the given user -func (pg *NativeFinder) UID(user string) ([]PID, error) { - var dst []PID - procs, err := process.Processes() +func (pg *NativeFinder) uid(user string) ([]pid, error) { + var dst []pid + procs, err := gopsprocess.Processes() if err != nil { return dst, err } @@ -27,35 +27,35 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) { continue } if username == user { - dst = append(dst, PID(p.Pid)) + dst = append(dst, pid(p.Pid)) } } return dst, nil } // PidFile returns the pid from the pid file given. -func (pg *NativeFinder) PidFile(path string) ([]PID, error) { - var pids []PID +func (pg *NativeFinder) pidFile(path string) ([]pid, error) { + var pids []pid pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("failed to read pidfile %q: %w", path, err) } - pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) + processID, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) if err != nil { return pids, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) return pids, nil } // FullPattern matches on the command line when the process was executed -func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { - var pids []PID +func (pg *NativeFinder) fullPattern(pattern string) ([]pid, error) { + var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.FastProcessList() + procs, err := pg.fastProcessList() if err != nil { return pids, err } @@ -66,18 +66,18 @@ func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { continue } if regxPattern.MatchString(cmd) { - pids = append(pids, PID(p.Pid)) + pids = append(pids, pid(p.Pid)) } } return pids, err } // Children matches children pids on the command line when the process was executed -func (pg *NativeFinder) Children(pid PID) ([]PID, error) { +func (pg *NativeFinder) children(processID pid) ([]pid, error) { // Get all running processes - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(processID)) if err != nil { - return nil, fmt.Errorf("getting process %d failed: %w", pid, err) + return nil, fmt.Errorf("getting process %d failed: %w", processID, err) } // Get all children of the current process @@ -85,35 +85,35 @@ func (pg *NativeFinder) Children(pid PID) ([]PID, error) { if err != nil { return nil, fmt.Errorf("unable to get children of process %d: %w", p.Pid, err) } - pids := make([]PID, 0, len(children)) + pids := make([]pid, 0, len(children)) for _, child := range children { - pids = append(pids, PID(child.Pid)) + pids = append(pids, pid(child.Pid)) } return pids, err } -func (pg *NativeFinder) FastProcessList() ([]*process.Process, error) { - pids, err := process.Pids() +func (pg *NativeFinder) fastProcessList() ([]*gopsprocess.Process, error) { + pids, err := gopsprocess.Pids() if err != nil { return nil, err } - result := make([]*process.Process, 0, len(pids)) + result := make([]*gopsprocess.Process, 0, len(pids)) for _, pid := range pids { - result = append(result, &process.Process{Pid: pid}) + result = append(result, &gopsprocess.Process{Pid: pid}) } return result, nil } // Pattern matches on the process name -func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { - var pids []PID +func (pg *NativeFinder) pattern(pattern string) ([]pid, error) { + var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.FastProcessList() + procs, err := pg.fastProcessList() if err != nil { return pids, err } @@ -124,7 +124,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { continue } if regxPattern.MatchString(name) { - pids = append(pids, PID(p.Pid)) + pids = append(pids, pid(p.Pid)) } } return pids, err diff --git a/plugins/inputs/procstat/native_finder_test.go b/plugins/inputs/procstat/native_finder_test.go index 1e6c6d84ade0c..e4e6e0bb8726d 100644 --- a/plugins/inputs/procstat/native_finder_test.go +++ b/plugins/inputs/procstat/native_finder_test.go @@ -14,7 +14,7 @@ import ( func BenchmarkPattern(b *testing.B) { finder := &NativeFinder{} for n := 0; n < b.N; n++ { - _, err := finder.Pattern(".*") + _, err := finder.pattern(".*") require.NoError(b, err) } } @@ -22,7 +22,7 @@ func BenchmarkPattern(b *testing.B) { func BenchmarkFullPattern(b *testing.B) { finder := &NativeFinder{} for n := 0; n < b.N; n++ { - _, err := finder.FullPattern(".*") + _, err := finder.fullPattern(".*") require.NoError(b, err) } } @@ -37,26 +37,26 @@ func TestChildPattern(t *testing.T) { require.NoError(t, err) // Spawn two child processes and get their PIDs - expected := make([]PID, 0, 2) + expected := make([]pid, 0, 2) ctx, cancel := context.WithCancel(context.Background()) defer cancel() // First process cmd1 := exec.CommandContext(ctx, "/bin/sh") require.NoError(t, cmd1.Start(), "starting first command failed") - expected = append(expected, PID(cmd1.Process.Pid)) + expected = append(expected, pid(cmd1.Process.Pid)) // Second process cmd2 := exec.CommandContext(ctx, "/bin/sh") require.NoError(t, cmd2.Start(), "starting first command failed") - expected = append(expected, PID(cmd2.Process.Pid)) + expected = append(expected, pid(cmd2.Process.Pid)) // Use the plugin to find the children finder := &NativeFinder{} - parent, err := finder.Pattern(parentName) + parent, err := finder.pattern(parentName) require.NoError(t, err) require.Len(t, parent, 1) - children, err := finder.Children(parent[0]) + children, err := finder.children(parent[0]) require.NoError(t, err) require.ElementsMatch(t, expected, children) } @@ -66,7 +66,7 @@ func TestGather_RealPatternIntegration(t *testing.T) { t.Skip("Skipping integration test in short mode") } pg := &NativeFinder{} - pids, err := pg.Pattern(`procstat`) + pids, err := pg.pattern(`procstat`) require.NoError(t, err) require.NotEmpty(t, pids) } @@ -79,7 +79,7 @@ func TestGather_RealFullPatternIntegration(t *testing.T) { t.Skip("Skipping integration test on Non-Windows OS") } pg := &NativeFinder{} - pids, err := pg.FullPattern(`%procstat%`) + pids, err := pg.fullPattern(`%procstat%`) require.NoError(t, err) require.NotEmpty(t, pids) } @@ -92,7 +92,7 @@ func TestGather_RealUserIntegration(t *testing.T) { require.NoError(t, err) pg := &NativeFinder{} - pids, err := pg.UID(currentUser.Username) + pids, err := pg.uid(currentUser.Username) require.NoError(t, err) require.NotEmpty(t, pids) } diff --git a/plugins/inputs/procstat/os_linux.go b/plugins/inputs/procstat/os_linux.go index 6c9d906faa276..cec134ee33232 100644 --- a/plugins/inputs/procstat/os_linux.go +++ b/plugins/inputs/procstat/os_linux.go @@ -13,15 +13,15 @@ import ( "github.com/coreos/go-systemd/v22/dbus" "github.com/prometheus/procfs" - "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsnet "github.com/shirou/gopsutil/v4/net" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" "github.com/influxdata/telegraf/internal" ) -func processName(p *process.Process) (string, error) { +func processName(p *gopsprocess.Process) (string, error) { return p.Exe() } @@ -29,7 +29,7 @@ func queryPidWithWinServiceName(_ string) (uint32, error) { return 0, errors.New("os not supporting win_service option") } -func collectMemmap(proc Process, prefix string, fields map[string]any) { +func collectMemmap(proc process, prefix string, fields map[string]any) { memMapStats, err := proc.MemoryMaps(true) if err == nil && len(*memMapStats) == 1 { memMap := (*memMapStats)[0] @@ -70,12 +70,12 @@ func findBySystemdUnits(units []string) ([]processGroup, error) { if !ok { return nil, fmt.Errorf("failed to parse PID %v of unit %q: invalid type %T", raw, u, raw) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of unit %q: %w", pid, u, err) } groups = append(groups, processGroup{ - processes: []*process.Process{p}, + processes: []*gopsprocess.Process{p}, tags: map[string]string{"systemd_unit": u.Name}, }) } @@ -87,14 +87,14 @@ func findByWindowsServices(_ []string) ([]processGroup, error) { return nil, nil } -func collectTotalReadWrite(proc Process) (r, w uint64, err error) { +func collectTotalReadWrite(proc process) (r, w uint64, err error) { path := internal.GetProcPath() fs, err := procfs.NewFS(path) if err != nil { return 0, 0, err } - p, err := fs.Proc(int(proc.PID())) + p, err := fs.Proc(int(proc.pid())) if err != nil { return 0, 0, err } @@ -177,7 +177,7 @@ func mapFdToInode(pid int32, fd uint32) (uint32, error) { return uint32(inode), nil } -func statsTCP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{}, error) { +func statsTCP(conns []gopsnet.ConnectionStat, family uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -185,7 +185,7 @@ func statsTCP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ // For TCP we need the inode for each connection to relate the connection // statistics to the actual process socket. Therefore, map the // file-descriptors to inodes using the /proc//fd entries. - inodes := make(map[uint32]net.ConnectionStat, len(conns)) + inodes := make(map[uint32]gopsnet.ConnectionStat, len(conns)) for _, c := range conns { inode, err := mapFdToInode(c.Pid, c.Fd) if err != nil { @@ -240,7 +240,7 @@ func statsTCP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ return fieldslist, nil } -func statsUDP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{}, error) { +func statsUDP(conns []gopsnet.ConnectionStat, family uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -248,7 +248,7 @@ func statsUDP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ // For UDP we need the inode for each connection to relate the connection // statistics to the actual process socket. Therefore, map the // file-descriptors to inodes using the /proc//fd entries. - inodes := make(map[uint32]net.ConnectionStat, len(conns)) + inodes := make(map[uint32]gopsnet.ConnectionStat, len(conns)) for _, c := range conns { inode, err := mapFdToInode(c.Pid, c.Fd) if err != nil { @@ -299,7 +299,7 @@ func statsUDP(conns []net.ConnectionStat, family uint8) ([]map[string]interface{ return fieldslist, nil } -func statsUnix(conns []net.ConnectionStat) ([]map[string]interface{}, error) { +func statsUnix(conns []gopsnet.ConnectionStat) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -307,7 +307,7 @@ func statsUnix(conns []net.ConnectionStat) ([]map[string]interface{}, error) { // We need to read the inode for each connection to relate the connection // statistics to the actual process socket. Therefore, map the // file-descriptors to inodes using the /proc//fd entries. - inodes := make(map[uint32]net.ConnectionStat, len(conns)) + inodes := make(map[uint32]gopsnet.ConnectionStat, len(conns)) for _, c := range conns { inode, err := mapFdToInode(c.Pid, c.Fd) if err != nil { diff --git a/plugins/inputs/procstat/os_others.go b/plugins/inputs/procstat/os_others.go index 62334f885ccda..ba34038072a21 100644 --- a/plugins/inputs/procstat/os_others.go +++ b/plugins/inputs/procstat/os_others.go @@ -6,11 +6,11 @@ import ( "errors" "syscall" - "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsnet "github.com/shirou/gopsutil/v4/net" + gopsprocess "github.com/shirou/gopsutil/v4/process" ) -func processName(p *process.Process) (string, error) { +func processName(p *gopsprocess.Process) (string, error) { return p.Exe() } @@ -18,7 +18,7 @@ func queryPidWithWinServiceName(string) (uint32, error) { return 0, errors.New("os not supporting win_service option") } -func collectMemmap(Process, string, map[string]any) {} +func collectMemmap(process, string, map[string]any) {} func findBySystemdUnits([]string) ([]processGroup, error) { return nil, nil @@ -28,11 +28,11 @@ func findByWindowsServices([]string) ([]processGroup, error) { return nil, nil } -func collectTotalReadWrite(Process) (r, w uint64, err error) { +func collectTotalReadWrite(process) (r, w uint64, err error) { return 0, 0, errors.ErrUnsupported } -func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsTCP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -65,7 +65,7 @@ func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsUDP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -98,6 +98,6 @@ func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUnix([]net.ConnectionStat) ([]map[string]interface{}, error) { +func statsUnix([]gopsnet.ConnectionStat) ([]map[string]interface{}, error) { return nil, errors.ErrUnsupported } diff --git a/plugins/inputs/procstat/os_windows.go b/plugins/inputs/procstat/os_windows.go index 05ada5a4748bc..b15e424d405f7 100644 --- a/plugins/inputs/procstat/os_windows.go +++ b/plugins/inputs/procstat/os_windows.go @@ -8,13 +8,13 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsnet "github.com/shirou/gopsutil/v4/net" + gopsprocess "github.com/shirou/gopsutil/v4/process" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) -func processName(p *process.Process) (string, error) { +func processName(p *gopsprocess.Process) (string, error) { return p.Name() } @@ -57,7 +57,7 @@ func queryPidWithWinServiceName(winServiceName string) (uint32, error) { return p.ProcessId, nil } -func collectMemmap(Process, string, map[string]any) {} +func collectMemmap(process, string, map[string]any) {} func findBySystemdUnits([]string) ([]processGroup, error) { return nil, nil @@ -71,13 +71,13 @@ func findByWindowsServices(services []string) ([]processGroup, error) { return nil, fmt.Errorf("failed to query PID of service %q: %w", service, err) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of service %q: %w", pid, service, err) } groups = append(groups, processGroup{ - processes: []*process.Process{p}, + processes: []*gopsprocess.Process{p}, tags: map[string]string{"win_service": service}, }) } @@ -85,11 +85,11 @@ func findByWindowsServices(services []string) ([]processGroup, error) { return groups, nil } -func collectTotalReadWrite(Process) (r, w uint64, err error) { +func collectTotalReadWrite(process) (r, w uint64, err error) { return 0, 0, errors.ErrUnsupported } -func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsTCP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -122,7 +122,7 @@ func statsTCP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { +func statsUDP(conns []gopsnet.ConnectionStat, _ uint8) ([]map[string]interface{}, error) { if len(conns) == 0 { return nil, nil } @@ -155,6 +155,6 @@ func statsUDP(conns []net.ConnectionStat, _ uint8) ([]map[string]interface{}, er return fieldslist, nil } -func statsUnix([]net.ConnectionStat) ([]map[string]interface{}, error) { +func statsUnix([]gopsnet.ConnectionStat) ([]map[string]interface{}, error) { return nil, nil } diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 8451210e94530..add3a2dfb120d 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -11,54 +11,54 @@ import ( ) // Implementation of PIDGatherer that execs pgrep to find processes -type Pgrep struct { +type pgrep struct { path string } -func newPgrepFinder() (PIDFinder, error) { +func newPgrepFinder() (pidFinder, error) { path, err := exec.LookPath("pgrep") if err != nil { return nil, fmt.Errorf("could not find pgrep binary: %w", err) } - return &Pgrep{path}, nil + return &pgrep{path}, nil } -func (pg *Pgrep) PidFile(path string) ([]PID, error) { - var pids []PID +func (pg *pgrep) pidFile(path string) ([]pid, error) { + var pids []pid pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("failed to read pidfile %q: %w", path, err) } - pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) + processID, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) if err != nil { return pids, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) return pids, nil } -func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { +func (pg *pgrep) pattern(pattern string) ([]pid, error) { args := []string{pattern} return pg.find(args) } -func (pg *Pgrep) UID(user string) ([]PID, error) { +func (pg *pgrep) uid(user string) ([]pid, error) { args := []string{"-u", user} return pg.find(args) } -func (pg *Pgrep) FullPattern(pattern string) ([]PID, error) { +func (pg *pgrep) fullPattern(pattern string) ([]pid, error) { args := []string{"-f", pattern} return pg.find(args) } -func (pg *Pgrep) Children(pid PID) ([]PID, error) { +func (pg *pgrep) children(pid pid) ([]pid, error) { args := []string{"-P", strconv.FormatInt(int64(pid), 10)} return pg.find(args) } -func (pg *Pgrep) find(args []string) ([]PID, error) { +func (pg *pgrep) find(args []string) ([]pid, error) { // Execute pgrep with the given arguments buf, err := exec.Command(pg.path, args...).Output() if err != nil { @@ -73,13 +73,13 @@ func (pg *Pgrep) find(args []string) ([]PID, error) { // Parse the command output to extract the PIDs fields := strings.Fields(out) - pids := make([]PID, 0, len(fields)) + pids := make([]pid, 0, len(fields)) for _, field := range fields { - pid, err := strconv.ParseInt(field, 10, 32) + processID, err := strconv.ParseInt(field, 10, 32) if err != nil { return nil, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) } return pids, nil } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index a0e8e60c880f0..c5eeb831d8b73 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -9,41 +9,41 @@ import ( "time" gopsnet "github.com/shirou/gopsutil/v4/net" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) -type Process interface { - PID() PID +type process interface { Name() (string, error) - SetTag(string, string) - MemoryMaps(bool) (*[]process.MemoryMapsStat, error) - Metrics(string, *collectionConfig, time.Time) ([]telegraf.Metric, error) + MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) + pid() pid + setTag(string, string) + metrics(string, *collectionConfig, time.Time) ([]telegraf.Metric, error) } -type PIDFinder interface { - PidFile(path string) ([]PID, error) - Pattern(pattern string) ([]PID, error) - UID(user string) ([]PID, error) - FullPattern(path string) ([]PID, error) - Children(pid PID) ([]PID, error) +type pidFinder interface { + pidFile(path string) ([]pid, error) + pattern(pattern string) ([]pid, error) + uid(user string) ([]pid, error) + fullPattern(path string) ([]pid, error) + children(pid pid) ([]pid, error) } -type Proc struct { +type proc struct { hasCPUTimes bool tags map[string]string - *process.Process + *gopsprocess.Process } -func newProc(pid PID) (Process, error) { - p, err := process.NewProcess(int32(pid)) +func newProc(pid pid) (process, error) { + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, err } - proc := &Proc{ + proc := &proc{ Process: p, hasCPUTimes: false, tags: make(map[string]string), @@ -51,15 +51,15 @@ func newProc(pid PID) (Process, error) { return proc, nil } -func (p *Proc) PID() PID { - return PID(p.Process.Pid) +func (p *proc) pid() pid { + return pid(p.Process.Pid) } -func (p *Proc) SetTag(k, v string) { +func (p *proc) setTag(k, v string) { p.tags[k] = v } -func (p *Proc) percent(_ time.Duration) (float64, error) { +func (p *proc) percent(_ time.Duration) (float64, error) { cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true @@ -68,8 +68,8 @@ func (p *Proc) percent(_ time.Duration) (float64, error) { return cpuPerc, err } -// Add metrics a single Process -func (p *Proc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { +// Add metrics a single process +func (p *proc) metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { if prefix != "" { prefix += "_" } @@ -163,27 +163,27 @@ func (p *Proc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([]tel for _, rlim := range rlims { var name string switch rlim.Resource { - case process.RLIMIT_CPU: + case gopsprocess.RLIMIT_CPU: name = "cpu_time" - case process.RLIMIT_DATA: + case gopsprocess.RLIMIT_DATA: name = "memory_data" - case process.RLIMIT_STACK: + case gopsprocess.RLIMIT_STACK: name = "memory_stack" - case process.RLIMIT_RSS: + case gopsprocess.RLIMIT_RSS: name = "memory_rss" - case process.RLIMIT_NOFILE: + case gopsprocess.RLIMIT_NOFILE: name = "num_fds" - case process.RLIMIT_MEMLOCK: + case gopsprocess.RLIMIT_MEMLOCK: name = "memory_locked" - case process.RLIMIT_AS: + case gopsprocess.RLIMIT_AS: name = "memory_vms" - case process.RLIMIT_LOCKS: + case gopsprocess.RLIMIT_LOCKS: name = "file_locks" - case process.RLIMIT_SIGPENDING: + case gopsprocess.RLIMIT_SIGPENDING: name = "signals_pending" - case process.RLIMIT_NICE: + case gopsprocess.RLIMIT_NICE: name = "nice_priority" - case process.RLIMIT_RTPRIO: + case gopsprocess.RLIMIT_RTPRIO: name = "realtime_priority" default: continue diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 4e3e4df6d38c0..6bf1e8402dc69 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -15,7 +15,7 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/choice" @@ -28,14 +28,7 @@ var sampleConfig string // execCommand is so tests can mock out exec.Command usage. var execCommand = exec.Command -type PID int32 - -type collectionConfig struct { - solarisMode bool - tagging map[string]bool - features map[string]bool - socketProtos []string -} +type pid int32 type Procstat struct { PidFinder string `toml:"pid_finder"` @@ -57,24 +50,31 @@ type Procstat struct { Properties []string `toml:"properties"` SocketProtocols []string `toml:"socket_protocols"` TagWith []string `toml:"tag_with"` - Filter []Filter `toml:"filter"` + Filter []filter `toml:"filter"` Log telegraf.Logger `toml:"-"` - finder PIDFinder - processes map[PID]Process + finder pidFinder + processes map[pid]process cfg collectionConfig oldMode bool - createProcess func(PID) (Process, error) + createProcess func(pid) (process, error) +} + +type collectionConfig struct { + solarisMode bool + tagging map[string]bool + features map[string]bool + socketProtos []string } -type PidsTags struct { - PIDs []PID +type pidsTags struct { + PIDs []pid Tags map[string]string } type processGroup struct { - processes []*process.Process + processes []*gopsprocess.Process tags map[string]string } @@ -196,14 +196,14 @@ func (p *Procstat) Init() error { // New-style operations for i := range p.Filter { p.Filter[i].Log = p.Log - if err := p.Filter[i].Init(); err != nil { + if err := p.Filter[i].init(); err != nil { return fmt.Errorf("initializing filter %d failed: %w", i, err) } } } // Initialize the running process cache - p.processes = make(map[PID]Process) + p.processes = make(map[pid]process) return nil } @@ -240,7 +240,7 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { } var count int - running := make(map[PID]bool) + running := make(map[pid]bool) for _, r := range results { if len(r.PIDs) < 1 && len(p.SupervisorUnits) > 0 { continue @@ -271,16 +271,16 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { // Add initial tags for k, v := range r.Tags { - proc.SetTag(k, v) + proc.setTag(k, v) } if p.ProcessName != "" { - proc.SetTag("process_name", p.ProcessName) + proc.setTag("process_name", p.ProcessName) } p.processes[pid] = proc } running[pid] = true - metrics, err := proc.Metrics(p.Prefix, &p.cfg, now) + metrics, err := proc.metrics(p.Prefix, &p.cfg, now) if err != nil { // Continue after logging an error as there might still be // metrics available @@ -324,9 +324,9 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error { func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { now := time.Now() - running := make(map[PID]bool) + running := make(map[pid]bool) for _, f := range p.Filter { - groups, err := f.ApplyFilter() + groups, err := f.applyFilter() if err != nil { // Add lookup error-metric acc.AddFields( @@ -357,8 +357,8 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { // Use the cached processes as we need the existing instances // to compute delta-metrics (e.g. cpu-usage). - pid := PID(gp.Pid) - proc, found := p.processes[pid] + pid := pid(gp.Pid) + process, found := p.processes[pid] if !found { //nolint:errcheck // Assumption: if a process has no name, it probably does not exist if name, _ := gp.Name(); name == "" { @@ -372,19 +372,19 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { tags[k] = v } if p.ProcessName != "" { - proc.SetTag("process_name", p.ProcessName) + process.setTag("process_name", p.ProcessName) } tags["filter"] = f.Name - proc = &Proc{ + process = &proc{ Process: gp, hasCPUTimes: false, tags: tags, } - p.processes[pid] = proc + p.processes[pid] = process } running[pid] = true - metrics, err := proc.Metrics(p.Prefix, &p.cfg, now) + metrics, err := process.metrics(p.Prefix, &p.cfg, now) if err != nil { // Continue after logging an error as there might still be // metrics available @@ -422,7 +422,7 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error { } // Get matching PIDs and their initial tags -func (p *Procstat) findPids() ([]PidsTags, error) { +func (p *Procstat) findPids() ([]pidsTags, error) { switch { case len(p.SupervisorUnits) > 0: return p.findSupervisorUnits() @@ -434,65 +434,65 @@ func (p *Procstat) findPids() ([]PidsTags, error) { return nil, err } tags := map[string]string{"win_service": p.WinService} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.CGroup != "": return p.cgroupPIDs() case p.PidFile != "": - pids, err := p.finder.PidFile(p.PidFile) + pids, err := p.finder.pidFile(p.PidFile) if err != nil { return nil, err } tags := map[string]string{"pidfile": p.PidFile} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.Exe != "": - pids, err := p.finder.Pattern(p.Exe) + pids, err := p.finder.pattern(p.Exe) if err != nil { return nil, err } tags := map[string]string{"exe": p.Exe} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.Pattern != "": - pids, err := p.finder.FullPattern(p.Pattern) + pids, err := p.finder.fullPattern(p.Pattern) if err != nil { return nil, err } tags := map[string]string{"pattern": p.Pattern} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil case p.User != "": - pids, err := p.finder.UID(p.User) + pids, err := p.finder.uid(p.User) if err != nil { return nil, err } tags := map[string]string{"user": p.User} - return []PidsTags{{pids, tags}}, nil + return []pidsTags{{pids, tags}}, nil } return nil, errors.New("no filter option set") } -func (p *Procstat) findSupervisorUnits() ([]PidsTags, error) { +func (p *Procstat) findSupervisorUnits() ([]pidsTags, error) { groups, groupsTags, err := p.supervisorPIDs() if err != nil { return nil, fmt.Errorf("getting supervisor PIDs failed: %w", err) } // According to the PID, find the system process number and get the child processes - pidTags := make([]PidsTags, 0, len(groups)) + pidTags := make([]pidsTags, 0, len(groups)) for _, group := range groups { grppid := groupsTags[group]["pid"] if grppid == "" { - pidTags = append(pidTags, PidsTags{nil, groupsTags[group]}) + pidTags = append(pidTags, pidsTags{nil, groupsTags[group]}) continue } - pid, err := strconv.ParseInt(grppid, 10, 32) + processID, err := strconv.ParseInt(grppid, 10, 32) if err != nil { return nil, fmt.Errorf("converting PID %q failed: %w", grppid, err) } // Get all children of the supervisor unit - pids, err := p.finder.Children(PID(pid)) + pids, err := p.finder.children(pid(processID)) if err != nil { - return nil, fmt.Errorf("getting children for %d failed: %w", pid, err) + return nil, fmt.Errorf("getting children for %d failed: %w", processID, err) } tags := map[string]string{"pattern": p.Pattern, "parent_pid": p.Pattern} @@ -510,7 +510,7 @@ func (p *Procstat) findSupervisorUnits() ([]PidsTags, error) { } // Remove duplicate pid tags delete(tags, "pid") - pidTags = append(pidTags, PidsTags{pids, tags}) + pidTags = append(pidTags, pidsTags{pids, tags}) } return pidTags, nil } @@ -559,30 +559,30 @@ func (p *Procstat) supervisorPIDs() ([]string, map[string]map[string]string, err return p.SupervisorUnits, mainPids, nil } -func (p *Procstat) systemdUnitPIDs() ([]PidsTags, error) { +func (p *Procstat) systemdUnitPIDs() ([]pidsTags, error) { if p.IncludeSystemdChildren { p.CGroup = "systemd/system.slice/" + p.SystemdUnit return p.cgroupPIDs() } - var pidTags []PidsTags + var pidTags []pidsTags pids, err := p.simpleSystemdUnitPIDs() if err != nil { return nil, err } tags := map[string]string{"systemd_unit": p.SystemdUnit} - pidTags = append(pidTags, PidsTags{pids, tags}) + pidTags = append(pidTags, pidsTags{pids, tags}) return pidTags, nil } -func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { +func (p *Procstat) simpleSystemdUnitPIDs() ([]pid, error) { out, err := execCommand("systemctl", "show", p.SystemdUnit).Output() if err != nil { return nil, err } lines := bytes.Split(out, []byte{'\n'}) - pids := make([]PID, 0, len(lines)) + pids := make([]pid, 0, len(lines)) for _, line := range lines { kv := bytes.SplitN(line, []byte{'='}, 2) if len(kv) != 2 { @@ -594,17 +594,17 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } - pid, err := strconv.ParseInt(string(kv[1]), 10, 32) + processID, err := strconv.ParseInt(string(kv[1]), 10, 32) if err != nil { return nil, fmt.Errorf("invalid pid %q", kv[1]) } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) } return pids, nil } -func (p *Procstat) cgroupPIDs() ([]PidsTags, error) { +func (p *Procstat) cgroupPIDs() ([]pidsTags, error) { procsPath := p.CGroup if procsPath[0] != '/' { procsPath = "/sys/fs/cgroup/" + procsPath @@ -615,20 +615,20 @@ func (p *Procstat) cgroupPIDs() ([]PidsTags, error) { return nil, fmt.Errorf("glob failed: %w", err) } - pidTags := make([]PidsTags, 0, len(items)) + pidTags := make([]pidsTags, 0, len(items)) for _, item := range items { pids, err := p.singleCgroupPIDs(item) if err != nil { return nil, err } tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} - pidTags = append(pidTags, PidsTags{pids, tags}) + pidTags = append(pidTags, pidsTags{pids, tags}) } return pidTags, nil } -func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { +func (p *Procstat) singleCgroupPIDs(path string) ([]pid, error) { ok, err := isDir(path) if err != nil { return nil, err @@ -643,16 +643,16 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { } lines := bytes.Split(out, []byte{'\n'}) - pids := make([]PID, 0, len(lines)) + pids := make([]pid, 0, len(lines)) for _, pidBS := range lines { if len(pidBS) == 0 { continue } - pid, err := strconv.ParseInt(string(pidBS), 10, 32) + processID, err := strconv.ParseInt(string(pidBS), 10, 32) if err != nil { return nil, fmt.Errorf("invalid pid %q", pidBS) } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) } return pids, nil @@ -666,15 +666,15 @@ func isDir(path string) (bool, error) { return result.IsDir(), nil } -func (p *Procstat) winServicePIDs() ([]PID, error) { - var pids []PID +func (p *Procstat) winServicePIDs() ([]pid, error) { + var pids []pid - pid, err := queryPidWithWinServiceName(p.WinService) + processID, err := queryPidWithWinServiceName(p.WinService) if err != nil { return pids, err } - pids = append(pids, PID(pid)) + pids = append(pids, pid(processID)) return pids, nil } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index aa833a86f9b24..4256f08e24234 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/shirou/gopsutil/v4/process" + gopsprocess "github.com/shirou/gopsutil/v4/process" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -77,73 +77,69 @@ TestGather_STARTINGsupervisorUnitPIDs STARTING`) } type testPgrep struct { - pids []PID + pids []pid err error } -func newTestFinder(pids []PID) PIDFinder { +func newTestFinder(pids []pid) pidFinder { return &testPgrep{ pids: pids, err: nil, } } -func (pg *testPgrep) PidFile(_ string) ([]PID, error) { +func (pg *testPgrep) pidFile(_ string) ([]pid, error) { return pg.pids, pg.err } -func (p *testProc) Cmdline() (string, error) { - return "test_proc", nil -} - -func (pg *testPgrep) Pattern(_ string) ([]PID, error) { +func (pg *testPgrep) pattern(_ string) ([]pid, error) { return pg.pids, pg.err } -func (pg *testPgrep) UID(_ string) ([]PID, error) { +func (pg *testPgrep) uid(_ string) ([]pid, error) { return pg.pids, pg.err } -func (pg *testPgrep) FullPattern(_ string) ([]PID, error) { +func (pg *testPgrep) fullPattern(_ string) ([]pid, error) { return pg.pids, pg.err } -func (pg *testPgrep) Children(_ PID) ([]PID, error) { - pids := []PID{7311, 8111, 8112} +func (pg *testPgrep) children(_ pid) ([]pid, error) { + pids := []pid{7311, 8111, 8112} return pids, pg.err } type testProc struct { - pid PID - tags map[string]string + procID pid + tags map[string]string } -func newTestProc(pid PID) (Process, error) { +func newTestProc(pid pid) (process, error) { proc := &testProc{ - pid: pid, - tags: make(map[string]string), + procID: pid, + tags: make(map[string]string), } return proc, nil } -func (p *testProc) PID() PID { - return p.pid +func (p *testProc) pid() pid { + return p.procID } func (p *testProc) Name() (string, error) { return "test_proc", nil } -func (p *testProc) SetTag(k, v string) { +func (p *testProc) setTag(k, v string) { p.tags[k] = v } -func (p *testProc) MemoryMaps(bool) (*[]process.MemoryMapsStat, error) { - stats := make([]process.MemoryMapsStat, 0) +func (p *testProc) MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) { + stats := make([]gopsprocess.MemoryMapsStat, 0) return &stats, nil } -func (p *testProc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { +func (p *testProc) metrics(prefix string, cfg *collectionConfig, t time.Time) ([]telegraf.Metric, error) { if prefix != "" { prefix += "_" } @@ -190,9 +186,9 @@ func (p *testProc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([ } if cfg.tagging["pid"] { - tags["pid"] = strconv.Itoa(int(p.pid)) + tags["pid"] = strconv.Itoa(int(p.procID)) } else { - fields["pid"] = int32(p.pid) + fields["pid"] = int32(p.procID) } if cfg.tagging["ppid"] { @@ -216,7 +212,7 @@ func (p *testProc) Metrics(prefix string, cfg *collectionConfig, t time.Time) ([ return []telegraf.Metric{metric.New("procstat", tags, fields, t)}, nil } -var pid = PID(42) +var processID = pid(42) var exe = "foo" func TestInitInvalidFinder(t *testing.T) { @@ -277,8 +273,8 @@ func TestGather_CreateProcessErrorOk(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), - createProcess: func(PID) (Process, error) { + finder: newTestFinder([]pid{processID}), + createProcess: func(pid) (process, error) { return nil, errors.New("createProcess error") }, } @@ -350,7 +346,7 @@ func TestGather_ProcessName(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -362,14 +358,14 @@ func TestGather_ProcessName(t *testing.T) { } func TestGather_NoProcessNameUsesReal(t *testing.T) { - pid := PID(os.Getpid()) + processID := pid(os.Getpid()) p := Procstat{ Exe: exe, PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -386,7 +382,7 @@ func TestGather_NoPidTag(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -405,7 +401,7 @@ func TestGather_PidTag(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -424,7 +420,7 @@ func TestGather_Prefix(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -441,7 +437,7 @@ func TestGather_Exe(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -460,7 +456,7 @@ func TestGather_User(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -479,7 +475,7 @@ func TestGather_Pattern(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -498,7 +494,7 @@ func TestGather_PidFile(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -510,7 +506,7 @@ func TestGather_PidFile(t *testing.T) { } func TestGather_PercentFirstPass(t *testing.T) { - pid := PID(os.Getpid()) + processID := pid(os.Getpid()) p := Procstat{ Pattern: "foo", @@ -518,7 +514,7 @@ func TestGather_PercentFirstPass(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newProc, } require.NoError(t, p.Init()) @@ -531,7 +527,7 @@ func TestGather_PercentFirstPass(t *testing.T) { } func TestGather_PercentSecondPass(t *testing.T) { - pid := PID(os.Getpid()) + processID := pid(os.Getpid()) p := Procstat{ Pattern: "foo", @@ -539,7 +535,7 @@ func TestGather_PercentSecondPass(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newProc, } require.NoError(t, p.Init()) @@ -558,7 +554,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) @@ -566,7 +562,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) { require.NoError(t, err) for _, pidsTag := range pidsTags { - require.Equal(t, []PID{11408}, pidsTag.PIDs) + require.Equal(t, []pid{11408}, pidsTag.PIDs) require.Equal(t, "TestGather_systemdUnitPIDs", pidsTag.Tags["systemd_unit"]) } } @@ -585,14 +581,14 @@ func TestGather_cgroupPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) pidsTags, err := p.findPids() require.NoError(t, err) for _, pidsTag := range pidsTags { - require.Equal(t, []PID{1234, 5678}, pidsTag.PIDs) + require.Equal(t, []pid{1234, 5678}, pidsTag.PIDs) require.Equal(t, td, pidsTag.Tags["cgroup"]) } } @@ -603,7 +599,7 @@ func TestProcstatLookupMetric(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{543}), + finder: newTestFinder([]pid{543}), createProcess: newProc, } require.NoError(t, p.Init()) @@ -621,7 +617,7 @@ func TestGather_SameTimestamps(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), createProcess: newTestProc, } require.NoError(t, p.Init()) @@ -641,14 +637,14 @@ func TestGather_supervisorUnitPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) pidsTags, err := p.findPids() require.NoError(t, err) for _, pidsTag := range pidsTags { - require.Equal(t, []PID{7311, 8111, 8112}, pidsTag.PIDs) + require.Equal(t, []pid{7311, 8111, 8112}, pidsTag.PIDs) require.Equal(t, "TestGather_supervisorUnitPIDs", pidsTag.Tags["supervisor_unit"]) } } @@ -659,7 +655,7 @@ func TestGather_MoresupervisorUnitPIDs(t *testing.T) { PidFinder: "test", Properties: []string{"cpu", "memory", "mmap"}, Log: testutil.Logger{}, - finder: newTestFinder([]PID{pid}), + finder: newTestFinder([]pid{processID}), } require.NoError(t, p.Init()) diff --git a/plugins/inputs/procstat/service_finders.go b/plugins/inputs/procstat/service_finders.go index 169c64f70957c..df9bc039d326f 100644 --- a/plugins/inputs/procstat/service_finders.go +++ b/plugins/inputs/procstat/service_finders.go @@ -8,8 +8,9 @@ import ( "strconv" "strings" + gopsprocess "github.com/shirou/gopsutil/v4/process" + "github.com/influxdata/telegraf" - "github.com/shirou/gopsutil/v4/process" ) type processFinder struct { @@ -36,13 +37,13 @@ func (f *processFinder) findByPidFiles(paths []string) ([]processGroup, error) { return nil, fmt.Errorf("failed to parse PID in file %q: %w", path, err) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil && !f.errPidFiles[path] { f.log.Errorf("failed to find process for PID %d of file %q: %v", pid, path, err) f.errPidFiles[path] = true } groups = append(groups, processGroup{ - processes: []*process.Process{p}, + processes: []*gopsprocess.Process{p}, tags: map[string]string{"pidfile": path}, }) } @@ -76,7 +77,7 @@ func findByCgroups(cgroups []string) ([]processGroup, error) { return nil, err } lines := bytes.Split(buf, []byte{'\n'}) - procs := make([]*process.Process, 0, len(lines)) + procs := make([]*gopsprocess.Process, 0, len(lines)) for _, l := range lines { l := strings.TrimSpace(string(l)) if len(l) == 0 { @@ -86,7 +87,7 @@ func findByCgroups(cgroups []string) ([]processGroup, error) { if err != nil { return nil, fmt.Errorf("failed to parse PID %q in file %q", l, fpath) } - p, err := process.NewProcess(int32(pid)) + p, err := gopsprocess.NewProcess(int32(pid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of %q: %w", pid, fpath, err) } @@ -130,7 +131,7 @@ func findBySupervisorUnits(units string) ([]processGroup, error) { "status": status, } - var procs []*process.Process + var procs []*gopsprocess.Process switch status { case "FATAL", "EXITED", "BACKOFF", "STOPPING": tags["error"] = strings.Join(kv[2:], " ") @@ -141,7 +142,7 @@ func findBySupervisorUnits(units string) ([]processGroup, error) { if err != nil { return nil, fmt.Errorf("failed to parse group PID %q: %w", rawpid, err) } - p, err := process.NewProcess(int32(grouppid)) + p, err := gopsprocess.NewProcess(int32(grouppid)) if err != nil { return nil, fmt.Errorf("failed to find process for PID %d of unit %q: %w", grouppid, name, err) } diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go index 431e3231996e0..9020929f598ab 100644 --- a/plugins/inputs/prometheus/consul.go +++ b/plugins/inputs/prometheus/consul.go @@ -14,17 +14,17 @@ import ( "github.com/influxdata/telegraf/config" ) -type ConsulConfig struct { +type consulConfig struct { // Address of the Consul agent. The address must contain a hostname or an IP address // and optionally a port (format: "host:port"). Enabled bool `toml:"enabled"` Agent string `toml:"agent"` QueryInterval config.Duration `toml:"query_interval"` - Queries []*ConsulQuery `toml:"query"` + Queries []*consulQuery `toml:"query"` } // One Consul service discovery query -type ConsulQuery struct { +type consulQuery struct { // A name of the searched services (not ID) ServiceName string `toml:"name"` @@ -128,7 +128,7 @@ func (p *Prometheus) startConsul(ctx context.Context) error { } func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { - consulServiceURLs := make(map[string]URLAndAddress) + consulServiceURLs := make(map[string]urlAndAddress) p.Log.Debugf("Refreshing Consul services") @@ -165,8 +165,8 @@ func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { p.Log.Infof("Created scrape URLs from Consul for Service (%s, %s)", q.ServiceName, q.ServiceTag) } q.lastQueryFailed = false - p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.URL.String()) - consulServiceURLs[uaa.URL.String()] = *uaa + p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.url.String()) + consulServiceURLs[uaa.url.String()] = *uaa } } @@ -177,7 +177,7 @@ func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { return nil } -func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) (*URLAndAddress, error) { +func (p *Prometheus) getConsulServiceURL(q *consulQuery, s *api.CatalogService) (*urlAndAddress, error) { var buffer bytes.Buffer buffer.Reset() err := q.serviceURLTemplate.Execute(&buffer, s) @@ -201,9 +201,9 @@ func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) p.Log.Debugf("Will scrape metrics from Consul Service %s", serviceURL.String()) - return &URLAndAddress{ - URL: serviceURL, - OriginalURL: serviceURL, - Tags: extraTags, + return &urlAndAddress{ + url: serviceURL, + originalURL: serviceURL, + tags: extraTags, }, nil } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index eefe5a215a8cf..2c4ef136c18ca 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -124,11 +124,11 @@ func shouldScrapePod(pod *corev1.Pod, p *Prometheus) bool { var shouldScrape bool switch p.MonitorKubernetesPodsMethod { - case MonitorMethodAnnotations: // must have 'true' annotation to be scraped + case monitorMethodAnnotations: // must have 'true' annotation to be scraped shouldScrape = pod.Annotations != nil && pod.Annotations["prometheus.io/scrape"] == "true" - case MonitorMethodSettings: // will be scraped regardless of annotation + case monitorMethodSettings: // will be scraped regardless of annotation shouldScrape = true - case MonitorMethodSettingsAndAnnotations: // will be scraped unless opts out with 'false' annotation + case monitorMethodSettingsAndAnnotations: // will be scraped unless opts out with 'false' annotation shouldScrape = pod.Annotations == nil || pod.Annotations["prometheus.io/scrape"] != "false" } @@ -194,7 +194,7 @@ func (p *Prometheus) watchPod(ctx context.Context, clientset *kubernetes.Clients if err != nil { p.Log.Errorf("getting key from cache %s", err.Error()) } - podID := PodID(key) + podID := podID(key) if shouldScrapePod(newPod, p) { // When Informers re-Lists, pod might already be registered, // do nothing if it is, register otherwise @@ -209,7 +209,7 @@ func (p *Prometheus) watchPod(ctx context.Context, clientset *kubernetes.Clients DeleteFunc: func(oldObj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(oldObj) if err == nil { - unregisterPod(PodID(key), p) + unregisterPod(podID(key), p) } }, }) @@ -280,7 +280,7 @@ func updateCadvisorPodList(p *Prometheus, req *http.Request) error { // Updating pod list to be latest cadvisor response p.lock.Lock() - p.kubernetesPods = make(map[PodID]URLAndAddress) + p.kubernetesPods = make(map[podID]urlAndAddress) // Register pod only if it has an annotation to scrape, if it is ready, // and if namespace and selectors are specified and match @@ -419,7 +419,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { tags[k] = v } } - podURL := p.AddressToURL(targetURL, targetURL.Hostname()) + podURL := p.addressToURL(targetURL, targetURL.Hostname()) // Locks earlier if using cAdvisor calls - makes a new list each time // rather than updating and removing from the same list @@ -427,12 +427,12 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { p.lock.Lock() defer p.lock.Unlock() } - p.kubernetesPods[PodID(pod.GetNamespace()+"/"+pod.GetName())] = URLAndAddress{ - URL: podURL, - Address: targetURL.Hostname(), - OriginalURL: targetURL, - Tags: tags, - Namespace: pod.GetNamespace(), + p.kubernetesPods[podID(pod.GetNamespace()+"/"+pod.GetName())] = urlAndAddress{ + url: podURL, + address: targetURL.Hostname(), + originalURL: targetURL, + tags: tags, + namespace: pod.GetNamespace(), } } @@ -446,15 +446,15 @@ func getScrapeURL(pod *corev1.Pod, p *Prometheus) (*url.URL, error) { var scheme, pathAndQuery, port string - if p.MonitorKubernetesPodsMethod == MonitorMethodSettings || - p.MonitorKubernetesPodsMethod == MonitorMethodSettingsAndAnnotations { + if p.MonitorKubernetesPodsMethod == monitorMethodSettings || + p.MonitorKubernetesPodsMethod == monitorMethodSettingsAndAnnotations { scheme = p.MonitorKubernetesPodsScheme pathAndQuery = p.MonitorKubernetesPodsPath port = strconv.Itoa(p.MonitorKubernetesPodsPort) } - if p.MonitorKubernetesPodsMethod == MonitorMethodAnnotations || - p.MonitorKubernetesPodsMethod == MonitorMethodSettingsAndAnnotations { + if p.MonitorKubernetesPodsMethod == monitorMethodAnnotations || + p.MonitorKubernetesPodsMethod == monitorMethodSettingsAndAnnotations { if ann := pod.Annotations["prometheus.io/scheme"]; ann != "" { scheme = ann } @@ -489,12 +489,12 @@ func getScrapeURL(pod *corev1.Pod, p *Prometheus) (*url.URL, error) { return base, nil } -func unregisterPod(podID PodID, p *Prometheus) { +func unregisterPod(podID podID, p *Prometheus) { p.lock.Lock() defer p.lock.Unlock() if v, ok := p.kubernetesPods[podID]; ok { p.Log.Debugf("registered a delete request for %s", podID) delete(p.kubernetesPods, podID) - p.Log.Debugf("will stop scraping for %q", v.URL.String()) + p.Log.Debugf("will stop scraping for %q", v.url.String()) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 98be067b395d1..5e2e2e3ca8cfb 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -18,8 +18,8 @@ func initPrometheus() *Prometheus { prom.MonitorKubernetesPodsScheme = "http" prom.MonitorKubernetesPodsPort = 9102 prom.MonitorKubernetesPodsPath = "/metrics" - prom.MonitorKubernetesPodsMethod = MonitorMethodAnnotations - prom.kubernetesPods = map[PodID]URLAndAddress{} + prom.MonitorKubernetesPodsMethod = monitorMethodAnnotations + prom.kubernetesPods = map[podID]urlAndAddress{} return prom } @@ -34,7 +34,7 @@ func TestScrapeURLNoAnnotations(t *testing.T) { func TestScrapeURLNoAnnotationsScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() p.Annotations = map[string]string{} @@ -45,7 +45,7 @@ func TestScrapeURLNoAnnotationsScrapeConfig(t *testing.T) { func TestScrapeURLScrapeConfigCustom(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations prom.MonitorKubernetesPodsScheme = "https" prom.MonitorKubernetesPodsPort = 9999 @@ -66,7 +66,7 @@ func TestScrapeURLAnnotations(t *testing.T) { func TestScrapeURLAnnotationsScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() url, err := getScrapeURL(p, prom) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestScrapeURLAnnotationsCustomPort(t *testing.T) { func TestScrapeURLAnnotationsCustomPortScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() p.Annotations = map[string]string{"prometheus.io/port": "9000"} url, err := getScrapeURL(p, prom) @@ -129,7 +129,7 @@ func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { } func TestAddPod(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -139,7 +139,7 @@ func TestAddPod(t *testing.T) { func TestAddPodScrapeConfig(t *testing.T) { prom := initPrometheus() - prom.MonitorKubernetesPodsMethod = MonitorMethodSettingsAndAnnotations + prom.MonitorKubernetesPodsMethod = monitorMethodSettingsAndAnnotations p := pod() p.Annotations = map[string]string{} @@ -148,7 +148,7 @@ func TestAddPodScrapeConfig(t *testing.T) { } func TestAddMultipleDuplicatePods(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -156,13 +156,13 @@ func TestAddMultipleDuplicatePods(t *testing.T) { p.Name = "Pod2" registerPod(p, prom) - urls, err := prom.GetAllURLs() + urls, err := prom.getAllURLs() require.NoError(t, err) require.Len(t, urls, 1) } func TestAddMultiplePods(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -174,41 +174,41 @@ func TestAddMultiplePods(t *testing.T) { } func TestDeletePods(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - podID, err := cache.MetaNamespaceKeyFunc(p) + id, err := cache.MetaNamespaceKeyFunc(p) require.NoError(t, err) - unregisterPod(PodID(podID), prom) + unregisterPod(podID(id), prom) require.Empty(t, prom.kubernetesPods) } func TestKeepDefaultNamespaceLabelName(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - podID, err := cache.MetaNamespaceKeyFunc(p) + id, err := cache.MetaNamespaceKeyFunc(p) require.NoError(t, err) - tags := prom.kubernetesPods[PodID(podID)].Tags + tags := prom.kubernetesPods[podID(id)].tags require.Equal(t, "default", tags["namespace"]) } func TestChangeNamespaceLabelName(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, PodNamespaceLabelName: "pod_namespace", kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, PodNamespaceLabelName: "pod_namespace", kubernetesPods: map[podID]urlAndAddress{}} p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - podID, err := cache.MetaNamespaceKeyFunc(p) + id, err := cache.MetaNamespaceKeyFunc(p) require.NoError(t, err) - tags := prom.kubernetesPods[PodID(podID)].Tags + tags := prom.kubernetesPods[podID(id)].tags require.Equal(t, "default", tags["pod_namespace"]) require.Equal(t, "", tags["namespace"]) } @@ -300,14 +300,14 @@ func TestAnnotationFilters(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} prom.PodAnnotationInclude = tc.include prom.PodAnnotationExclude = tc.exclude require.NoError(t, prom.initFilters()) registerPod(p, prom) for _, pd := range prom.kubernetesPods { for _, tagKey := range tc.expectedTags { - require.Contains(t, pd.Tags, tagKey) + require.Contains(t, pd.tags, tagKey) } } }) @@ -345,14 +345,14 @@ func TestLabelFilters(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[PodID]URLAndAddress{}} + prom := &Prometheus{Log: testutil.Logger{}, kubernetesPods: map[podID]urlAndAddress{}} prom.PodLabelInclude = tc.include prom.PodLabelExclude = tc.exclude require.NoError(t, prom.initFilters()) registerPod(p, prom) for _, pd := range prom.kubernetesPods { for _, tagKey := range tc.expectedTags { - require.Contains(t, pd.Tags, tagKey) + require.Contains(t, pd.tags, tagKey) } } }) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 191d27dd29a58..8b557a9cab979 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -34,18 +34,14 @@ import ( //go:embed sample.conf var sampleConfig string -const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` - -type MonitorMethod string - const ( - MonitorMethodNone MonitorMethod = "" - MonitorMethodAnnotations MonitorMethod = "annotations" - MonitorMethodSettings MonitorMethod = "settings" - MonitorMethodSettingsAndAnnotations MonitorMethod = "settings+annotations" -) + acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` -type PodID string + monitorMethodNone monitorMethod = "" + monitorMethodAnnotations monitorMethod = "annotations" + monitorMethodSettings monitorMethod = "settings" + monitorMethodSettingsAndAnnotations monitorMethod = "settings+annotations" +) type Prometheus struct { URLs []string `toml:"urls"` @@ -72,7 +68,7 @@ type Prometheus struct { KubeConfig string `toml:"kube_config"` KubernetesLabelSelector string `toml:"kubernetes_label_selector"` KubernetesFieldSelector string `toml:"kubernetes_field_selector"` - MonitorKubernetesPodsMethod MonitorMethod `toml:"monitor_kubernetes_pods_method"` + MonitorKubernetesPodsMethod monitorMethod `toml:"monitor_kubernetes_pods_method"` MonitorKubernetesPodsScheme string `toml:"monitor_kubernetes_pods_scheme"` MonitorKubernetesPodsPath string `toml:"monitor_kubernetes_pods_path"` MonitorKubernetesPodsPort int `toml:"monitor_kubernetes_pods_port"` @@ -85,7 +81,7 @@ type Prometheus struct { CacheRefreshInterval int `toml:"cache_refresh_interval"` // Consul discovery - ConsulConfig ConsulConfig `toml:"consul"` + ConsulConfig consulConfig `toml:"consul"` Log telegraf.Logger `toml:"-"` common_http.HTTPClientConfig @@ -100,7 +96,7 @@ type Prometheus struct { // Should we scrape Kubernetes services for prometheus annotations lock sync.Mutex - kubernetesPods map[PodID]URLAndAddress + kubernetesPods map[podID]urlAndAddress cancel context.CancelFunc wg sync.WaitGroup @@ -114,9 +110,21 @@ type Prometheus struct { podLabelExcludeFilter filter.Filter // List of consul services to scrape - consulServices map[string]URLAndAddress + consulServices map[string]urlAndAddress } +type urlAndAddress struct { + originalURL *url.URL + url *url.URL + address string + tags map[string]string + namespace string +} + +type monitorMethod string + +type podID string + func (*Prometheus) SampleConfig() string { return sampleConfig } @@ -164,8 +172,8 @@ func (p *Prometheus) Init() error { p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.") } - if p.MonitorKubernetesPodsMethod == MonitorMethodNone { - p.MonitorKubernetesPodsMethod = MonitorMethodAnnotations + if p.MonitorKubernetesPodsMethod == monitorMethodNone { + p.MonitorKubernetesPodsMethod = monitorMethodAnnotations } // Parse label and field selectors - will be used to filter pods after cAdvisor call @@ -239,11 +247,65 @@ func (p *Prometheus) Init() error { "Accept": acceptHeader, } - p.kubernetesPods = make(map[PodID]URLAndAddress) + p.kubernetesPods = make(map[podID]urlAndAddress) return nil } +// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration +func (p *Prometheus) Start(_ telegraf.Accumulator) error { + var ctx context.Context + p.wg = sync.WaitGroup{} + ctx, p.cancel = context.WithCancel(context.Background()) + + if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { + if err := p.startConsul(ctx); err != nil { + return err + } + } + if p.MonitorPods { + if err := p.startK8s(ctx); err != nil { + return err + } + } + return nil +} + +func (p *Prometheus) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + allURLs, err := p.getAllURLs() + if err != nil { + return err + } + for _, URL := range allURLs { + wg.Add(1) + go func(serviceURL urlAndAddress) { + defer wg.Done() + requestFields, tags, err := p.gatherURL(serviceURL, acc) + acc.AddError(err) + + // Add metrics + if p.EnableRequestMetrics { + acc.AddFields("prometheus_request", requestFields, tags) + } + }(URL) + } + + wg.Wait() + + return nil +} + +func (p *Prometheus) Stop() { + p.cancel() + p.wg.Wait() + + if p.client != nil { + p.client.CloseIdleConnections() + } +} + func (p *Prometheus) initFilters() error { if p.PodAnnotationExclude != nil { podAnnotationExclude, err := filter.Compile(p.PodAnnotationExclude) @@ -276,7 +338,7 @@ func (p *Prometheus) initFilters() error { return nil } -func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { +func (p *Prometheus) addressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { host = address + ":" + u.Port() @@ -295,23 +357,15 @@ func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { return reconstructedURL } -type URLAndAddress struct { - OriginalURL *url.URL - URL *url.URL - Address string - Tags map[string]string - Namespace string -} - -func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { - allURLs := make(map[string]URLAndAddress, len(p.URLs)+len(p.consulServices)+len(p.kubernetesPods)) +func (p *Prometheus) getAllURLs() (map[string]urlAndAddress, error) { + allURLs := make(map[string]urlAndAddress, len(p.URLs)+len(p.consulServices)+len(p.kubernetesPods)) for _, u := range p.URLs { address, err := url.Parse(u) if err != nil { p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } - allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address} + allURLs[address.String()] = urlAndAddress{url: address, originalURL: address} } p.lock.Lock() @@ -322,8 +376,8 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { } // loop through all pods scraped via the prometheus annotation on the pods for _, v := range p.kubernetesPods { - if namespaceAnnotationMatch(v.Namespace, p) { - allURLs[v.URL.String()] = v + if namespaceAnnotationMatch(v.namespace, p) { + allURLs[v.url.String()] = v } } @@ -339,62 +393,34 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { continue } for _, resolved := range resolvedAddresses { - serviceURL := p.AddressToURL(address, resolved) - allURLs[serviceURL.String()] = URLAndAddress{ - URL: serviceURL, - Address: resolved, - OriginalURL: address, + serviceURL := p.addressToURL(address, resolved) + allURLs[serviceURL.String()] = urlAndAddress{ + url: serviceURL, + address: resolved, + originalURL: address, } } } return allURLs, nil } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (p *Prometheus) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup - - allURLs, err := p.GetAllURLs() - if err != nil { - return err - } - for _, URL := range allURLs { - wg.Add(1) - go func(serviceURL URLAndAddress) { - defer wg.Done() - requestFields, tags, err := p.gatherURL(serviceURL, acc) - acc.AddError(err) - - // Add metrics - if p.EnableRequestMetrics { - acc.AddFields("prometheus_request", requestFields, tags) - } - }(URL) - } - - wg.Wait() - - return nil -} - -func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[string]interface{}, map[string]string, error) { +func (p *Prometheus) gatherURL(u urlAndAddress, acc telegraf.Accumulator) (map[string]interface{}, map[string]string, error) { var req *http.Request var uClient *http.Client requestFields := make(map[string]interface{}) - tags := make(map[string]string, len(u.Tags)+2) + tags := make(map[string]string, len(u.tags)+2) if p.URLTag != "" { - tags[p.URLTag] = u.OriginalURL.String() + tags[p.URLTag] = u.originalURL.String() } - if u.Address != "" { - tags["address"] = u.Address + if u.address != "" { + tags["address"] = u.address } - for k, v := range u.Tags { + for k, v := range u.tags { tags[k] = v } - if u.URL.Scheme == "unix" { - path := u.URL.Query().Get("path") + if u.url.Scheme == "unix" { + path := u.url.Query().Get("path") if path == "" { path = "/metrics" } @@ -413,19 +439,19 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s TLSClientConfig: tlsCfg, DisableKeepAlives: true, Dial: func(string, string) (net.Conn, error) { - c, err := net.Dial("unix", u.URL.Path) + c, err := net.Dial("unix", u.url.Path) return c, err }, }, } } else { - if u.URL.Path == "" { - u.URL.Path = "/metrics" + if u.url.Path == "" { + u.url.Path = "/metrics" } var err error - req, err = http.NewRequest("GET", u.URL.String(), nil) + req, err = http.NewRequest("GET", u.url.String(), nil) if err != nil { - return nil, nil, fmt.Errorf("unable to create new request %q: %w", u.URL.String(), err) + return nil, nil, fmt.Errorf("unable to create new request %q: %w", u.url.String(), err) } } @@ -469,7 +495,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s var err error var resp *http.Response var start time.Time - if u.URL.Scheme != "unix" { + if u.url.Scheme != "unix" { start = time.Now() //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = p.client.Do(req) @@ -480,14 +506,14 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s } end := time.Since(start).Seconds() if err != nil { - return requestFields, tags, fmt.Errorf("error making HTTP request to %q: %w", u.URL, err) + return requestFields, tags, fmt.Errorf("error making HTTP request to %q: %w", u.url, err) } requestFields["response_time"] = end defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return requestFields, tags, fmt.Errorf("%q returned HTTP status %q", u.URL, resp.Status) + return requestFields, tags, fmt.Errorf("%q returned HTTP status %q", u.url, resp.Status) } var body []byte @@ -504,7 +530,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s return requestFields, tags, fmt.Errorf("error reading body: %w", err) } if int64(len(body)) > limit { - p.Log.Infof("skipping %s: content length exceeded maximum body size (%d)", u.URL, limit) + p.Log.Infof("skipping %s: content length exceeded maximum body size (%d)", u.url, limit) return requestFields, tags, nil } } else { @@ -539,20 +565,20 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s } metrics, err := metricParser.Parse(body) if err != nil { - return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.URL, err) + return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.url, err) } for _, metric := range metrics { tags := metric.Tags() // strip user and password from URL - u.OriginalURL.User = nil + u.originalURL.User = nil if p.URLTag != "" { - tags[p.URLTag] = u.OriginalURL.String() + tags[p.URLTag] = u.originalURL.String() } - if u.Address != "" { - tags["address"] = u.Address + if u.address != "" { + tags["address"] = u.address } - for k, v := range u.Tags { + for k, v := range u.tags { tags[k] = v } @@ -603,39 +629,11 @@ func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { return true, "" } -// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration -func (p *Prometheus) Start(_ telegraf.Accumulator) error { - var ctx context.Context - p.wg = sync.WaitGroup{} - ctx, p.cancel = context.WithCancel(context.Background()) - - if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { - if err := p.startConsul(ctx); err != nil { - return err - } - } - if p.MonitorPods { - if err := p.startK8s(ctx); err != nil { - return err - } - } - return nil -} - -func (p *Prometheus) Stop() { - p.cancel() - p.wg.Wait() - - if p.client != nil { - p.client.CloseIdleConnections() - } -} - func init() { inputs.Add("prometheus", func() telegraf.Input { return &Prometheus{ - kubernetesPods: make(map[PodID]URLAndAddress), - consulServices: make(map[string]URLAndAddress), + kubernetesPods: make(map[podID]urlAndAddress), + consulServices: make(map[string]urlAndAddress), URLTag: "url", } }) diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 995ec9c8dcb8c..cdb723da3d357 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -630,7 +630,7 @@ func TestInitConfigSelectors(t *testing.T) { URLs: nil, URLTag: "url", MonitorPods: true, - MonitorKubernetesPodsMethod: MonitorMethodSettings, + MonitorKubernetesPodsMethod: monitorMethodSettings, PodScrapeInterval: 60, KubernetesLabelSelector: "app=test", KubernetesFieldSelector: "spec.nodeName=node-0", diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 22729e5ce76c2..7d4cebf2b9a95 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -23,18 +23,6 @@ func (*Proxmox) SampleConfig() string { return sampleConfig } -func (px *Proxmox) Gather(acc telegraf.Accumulator) error { - err := getNodeSearchDomain(px) - if err != nil { - return err - } - - gatherLxcData(px, acc) - gatherQemuData(px, acc) - - return nil -} - func (px *Proxmox) Init() error { // Set hostname as default node name for backwards compatibility if px.NodeName == "" { @@ -57,12 +45,16 @@ func (px *Proxmox) Init() error { return nil } -func init() { - inputs.Add("proxmox", func() telegraf.Input { - return &Proxmox{ - requestFunction: performRequest, - } - }) +func (px *Proxmox) Gather(acc telegraf.Accumulator) error { + err := getNodeSearchDomain(px) + if err != nil { + return err + } + + gatherLxcData(px, acc) + gatherQemuData(px, acc) + + return nil } func getNodeSearchDomain(px *Proxmox) error { @@ -274,3 +266,11 @@ func getTags(px *Proxmox, name string, vmConfig vmConfig, rt resourceType) map[s "vm_type": string(rt), } } + +func init() { + inputs.Add("proxmox", func() telegraf.Input { + return &Proxmox{ + requestFunction: performRequest, + } + }) +} diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index 941af52fb8a2b..47b6856f61e86 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -10,28 +10,28 @@ import ( "github.com/influxdata/telegraf/plugins/common/tls" ) +var ( + qemu resourceType = "qemu" + lxc resourceType = "lxc" +) + type Proxmox struct { BaseURL string `toml:"base_url"` APIToken string `toml:"api_token"` ResponseTimeout config.Duration `toml:"response_timeout"` NodeName string `toml:"node_name"` - tls.ClientConfig - httpClient *http.Client - nodeSearchDomain string + Log telegraf.Logger `toml:"-"` - requestFunction func(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) - Log telegraf.Logger `toml:"-"` + httpClient *http.Client + + nodeSearchDomain string + requestFunction func(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) } type resourceType string -var ( - qemu resourceType = "qemu" - lxc resourceType = "lxc" -) - type vmStats struct { Data []vmStat `json:"data"` } diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index f4332858d9d29..d7cc5d882d877 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -17,12 +17,11 @@ import ( //go:embed sample.conf var sampleConfig string -// PuppetAgent is a PuppetAgent plugin type PuppetAgent struct { - Location string + Location string `toml:"location"` } -type State struct { +type state struct { Events event Resources resource Changes change @@ -101,7 +100,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return err } - var puppetState State + var puppetState state err = yaml.Unmarshal(fh, &puppetState) if err != nil { @@ -114,7 +113,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return nil } -func structPrinter(s *State, acc telegraf.Accumulator, tags map[string]string) { +func structPrinter(s *state, acc telegraf.Accumulator, tags map[string]string) { e := reflect.ValueOf(s).Elem() fields := make(map[string]interface{}) From 53af37495d4ce3950a3ccba78bf0d40a8bc0b90d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 20:58:11 +0100 Subject: [PATCH 167/170] chore(deps): Bump github.com/Azure/go-autorest/autorest/adal from 0.9.23 to 0.9.24 (#16315) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c67bdf8cfb7c5..0a489cf4eb693 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20230531184854-c06a8eff66fe github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/adal v0.9.23 + github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 github.com/BurntSushi/toml v1.4.0 github.com/ClickHouse/clickhouse-go v1.5.4 diff --git a/go.sum b/go.sum index d68cf935ee6b3..0e4d82486c5f3 100644 --- a/go.sum +++ b/go.sum @@ -689,8 +689,8 @@ github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/ github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= From 1739deac37e1d1f6d6a0ce88977c30fd28816914 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 18 Dec 2024 15:48:03 +0100 Subject: [PATCH 168/170] chore(deps): Bump github.com/vapourismo/knx-go from v0.0.0-20240217175130-922a0d50c241 to v0.0.0-20240915133544-a6ab43471c11 (#16324) --- go.mod | 2 +- go.sum | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0a489cf4eb693..5570ab00f3c5b 100644 --- a/go.mod +++ b/go.mod @@ -196,7 +196,7 @@ require ( github.com/tidwall/wal v1.1.7 github.com/tinylib/msgp v1.2.0 github.com/urfave/cli/v2 v2.27.2 - github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241 + github.com/vapourismo/knx-go v0.0.0-20240915133544-a6ab43471c11 github.com/vishvananda/netlink v1.3.0 github.com/vishvananda/netns v0.0.5 github.com/vjeantet/grok v1.0.1 diff --git a/go.sum b/go.sum index 0e4d82486c5f3..8f152edf1bd6c 100644 --- a/go.sum +++ b/go.sum @@ -2379,8 +2379,8 @@ github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241 h1:3r4OPQ/jPYQA0C7i149kevHLGSG4JZtrQv2986fXSCo= -github.com/vapourismo/knx-go v0.0.0-20240217175130-922a0d50c241/go.mod h1:aGkV5xHz9sBkAckp2hez7khfehKp4YvyBwAmVdVEulg= +github.com/vapourismo/knx-go v0.0.0-20240915133544-a6ab43471c11 h1:YzrpNqpAuAgUQ0vseiI3mAVz7zr0rM5LWdaGCCr6Ipc= +github.com/vapourismo/knx-go v0.0.0-20240915133544-a6ab43471c11/go.mod h1:+iC7aAxEwuJ4mvdKaY0zCGT0dpIC/AtHt4yv2jr5FOo= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= @@ -2719,6 +2719,7 @@ golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= From c3fb79e9931f38fc78fa8ca0bba2440aa7b9bf31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 18 Dec 2024 19:47:47 +0100 Subject: [PATCH 169/170] chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[l-r]` (#16325) --- plugins/inputs/lanz/lanz.go | 2 +- plugins/inputs/leofs/leofs.go | 6 +-- plugins/inputs/libvirt/libvirt.go | 2 +- .../inputs/libvirt/libvirt_metric_format.go | 40 +++++++++---------- plugins/inputs/linux_cpu/linux_cpu.go | 2 +- plugins/inputs/logstash/logstash.go | 25 +++++------- plugins/inputs/lvm/lvm.go | 4 -- plugins/inputs/mcrouter/mcrouter.go | 8 ++-- plugins/inputs/mcrouter/mcrouter_test.go | 8 +--- plugins/inputs/modbus/configuration_metric.go | 6 +-- .../inputs/modbus/configuration_register.go | 18 ++++----- .../inputs/modbus/configuration_request.go | 10 ++--- plugins/inputs/modbus/modbus.go | 10 ++--- plugins/inputs/monit/monit_test.go | 4 +- .../mqtt_consumer/mqtt_consumer_test.go | 22 +++++----- plugins/inputs/mysql/mysql.go | 30 +++++++------- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- plugins/inputs/neptune_apex/neptune_apex.go | 4 +- .../inputs/neptune_apex/neptune_apex_test.go | 6 +-- plugins/inputs/netflow/netflow.go | 2 +- plugins/inputs/netflow/netflow_v5.go | 4 +- .../nginx_upstream_check.go | 4 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 2 +- plugins/inputs/nvidia_smi/nvidia_smi.go | 2 +- plugins/inputs/opentelemetry/writer.go | 2 +- plugins/inputs/pf/pf.go | 4 +- plugins/inputs/phpfpm/fcgi_test.go | 6 +-- plugins/inputs/phpfpm/phpfpm_test.go | 2 +- .../inputs/powerdns/powerdns_linux_test.go | 7 +--- .../powerdns_recursor/powerdns_recursor.go | 2 +- .../inputs/powerdns_recursor/protocol_v3.go | 2 +- plugins/inputs/procstat/native_finder.go | 16 ++++---- plugins/inputs/procstat/pgrep.go | 2 +- plugins/inputs/procstat/procstat.go | 4 +- plugins/inputs/procstat/procstat_test.go | 4 +- plugins/inputs/prometheus/kubernetes.go | 2 +- plugins/inputs/prometheus/prometheus.go | 4 +- plugins/inputs/radius/radius.go | 2 +- plugins/inputs/raindrops/raindrops.go | 4 +- plugins/inputs/raindrops/raindrops_test.go | 3 +- plugins/inputs/redis/redis.go | 6 +-- plugins/inputs/redis/redis_test.go | 8 ++-- plugins/inputs/rethinkdb/rethinkdb.go | 6 +-- .../riemann_listener/riemann_listener.go | 2 +- 44 files changed, 145 insertions(+), 166 deletions(-) diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index bb2040d317cc9..a9589bb701777 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -58,7 +58,7 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error { return nil } -func (l *Lanz) Gather(_ telegraf.Accumulator) error { +func (*Lanz) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 3845384a19179..19e0878b2f608 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -159,7 +159,7 @@ func (*LeoFS) SampleConfig() string { func (l *LeoFS) Gather(acc telegraf.Accumulator) error { if len(l.Servers) == 0 { - return l.gatherServer(defaultEndpoint, serverTypeManagerMaster, acc) + return gatherServer(defaultEndpoint, serverTypeManagerMaster, acc) } var wg sync.WaitGroup for _, endpoint := range l.Servers { @@ -185,14 +185,14 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(endpoint string, st serverType) { defer wg.Done() - acc.AddError(l.gatherServer(endpoint, st, acc)) + acc.AddError(gatherServer(endpoint, st, acc)) }(endpoint, st) } wg.Wait() return nil } -func (l *LeoFS) gatherServer(endpoint string, serverType serverType, acc telegraf.Accumulator) error { +func gatherServer(endpoint string, serverType serverType, acc telegraf.Accumulator) error { cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", "-On", endpoint, oid) stdout, err := cmd.StdoutPipe() if err != nil { diff --git a/plugins/inputs/libvirt/libvirt.go b/plugins/inputs/libvirt/libvirt.go index 4a32eaf761773..a1fe4363f205b 100644 --- a/plugins/inputs/libvirt/libvirt.go +++ b/plugins/inputs/libvirt/libvirt.go @@ -47,7 +47,7 @@ type Libvirt struct { domainsMap map[string]struct{} } -func (l *Libvirt) SampleConfig() string { +func (*Libvirt) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/libvirt/libvirt_metric_format.go b/plugins/inputs/libvirt/libvirt_metric_format.go index 91946ed2cc66e..c87ebe1c880dd 100644 --- a/plugins/inputs/libvirt/libvirt_metric_format.go +++ b/plugins/inputs/libvirt/libvirt_metric_format.go @@ -17,31 +17,31 @@ var ( ) func (l *Libvirt) addMetrics(stats []golibvirt.DomainStatsRecord, vcpuInfos map[string][]vcpuAffinity, acc telegraf.Accumulator) { - domainsMetrics := l.translateMetrics(stats) + domainsMetrics := translateMetrics(stats) for domainName, metrics := range domainsMetrics { for metricType, values := range metrics { switch metricType { case "state": - l.addStateMetrics(values, domainName, acc) + addStateMetrics(values, domainName, acc) case "cpu": - l.addCPUMetrics(values, domainName, acc) + addCPUMetrics(values, domainName, acc) case "balloon": - l.addBalloonMetrics(values, domainName, acc) + addBalloonMetrics(values, domainName, acc) case "vcpu": l.addVcpuMetrics(values, domainName, vcpuInfos[domainName], acc) case "net": - l.addInterfaceMetrics(values, domainName, acc) + addInterfaceMetrics(values, domainName, acc) case "perf": - l.addPerfMetrics(values, domainName, acc) + addPerfMetrics(values, domainName, acc) case "block": - l.addBlockMetrics(values, domainName, acc) + addBlockMetrics(values, domainName, acc) case "iothread": - l.addIothreadMetrics(values, domainName, acc) + addIothreadMetrics(values, domainName, acc) case "memory": - l.addMemoryMetrics(values, domainName, acc) + addMemoryMetrics(values, domainName, acc) case "dirtyrate": - l.addDirtyrateMetrics(values, domainName, acc) + addDirtyrateMetrics(values, domainName, acc) } } } @@ -61,7 +61,7 @@ func (l *Libvirt) addMetrics(stats []golibvirt.DomainStatsRecord, vcpuInfos map[ } } -func (l *Libvirt) translateMetrics(stats []golibvirt.DomainStatsRecord) map[string]map[string]map[string]golibvirt.TypedParamValue { +func translateMetrics(stats []golibvirt.DomainStatsRecord) map[string]map[string]map[string]golibvirt.TypedParamValue { metrics := make(map[string]map[string]map[string]golibvirt.TypedParamValue) for _, stat := range stats { if stat.Params != nil { @@ -83,7 +83,7 @@ func (l *Libvirt) translateMetrics(stats []golibvirt.DomainStatsRecord) map[stri return metrics } -func (l *Libvirt) addStateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addStateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var stateFields = make(map[string]interface{}) var stateTags = map[string]string{ "domain_name": domainName, @@ -101,7 +101,7 @@ func (l *Libvirt) addStateMetrics(metrics map[string]golibvirt.TypedParamValue, } } -func (l *Libvirt) addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var cpuFields = make(map[string]interface{}) var cpuCacheMonitorTotalFields = make(map[string]interface{}) @@ -188,7 +188,7 @@ func (l *Libvirt) addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, do } } -func (l *Libvirt) addBalloonMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addBalloonMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var balloonFields = make(map[string]interface{}) var balloonTags = map[string]string{ "domain_name": domainName, @@ -283,7 +283,7 @@ func (l *Libvirt) getCurrentPCPUForVCPU(vcpuID string, vcpuInfos []vcpuAffinity) return -1 } -func (l *Libvirt) addInterfaceMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addInterfaceMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var netTotalFields = make(map[string]interface{}) var netData = make(map[string]map[string]interface{}) @@ -330,7 +330,7 @@ func (l *Libvirt) addInterfaceMetrics(metrics map[string]golibvirt.TypedParamVal } } -func (l *Libvirt) addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var perfFields = make(map[string]interface{}) var perfTags = map[string]string{ "domain_name": domainName, @@ -351,7 +351,7 @@ func (l *Libvirt) addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, d } } -func (l *Libvirt) addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var blockTotalFields = make(map[string]interface{}) var blockData = make(map[string]map[string]interface{}) @@ -399,7 +399,7 @@ func (l *Libvirt) addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, } } -func (l *Libvirt) addIothreadMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addIothreadMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var iothreadTotalFields = make(map[string]interface{}) var iothreadData = make(map[string]map[string]interface{}) @@ -446,7 +446,7 @@ func (l *Libvirt) addIothreadMetrics(metrics map[string]golibvirt.TypedParamValu } } -func (l *Libvirt) addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var memoryBandwidthMonitorTotalFields = make(map[string]interface{}) var memoryBandwidthMonitorData = make(map[string]map[string]interface{}) @@ -528,7 +528,7 @@ func (l *Libvirt) addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, } } -func (l *Libvirt) addDirtyrateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { +func addDirtyrateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { var dirtyrateFields = make(map[string]interface{}) var dirtyrateVcpuData = make(map[string]map[string]interface{}) diff --git a/plugins/inputs/linux_cpu/linux_cpu.go b/plugins/inputs/linux_cpu/linux_cpu.go index e7839d4e172c7..1457184c9da06 100644 --- a/plugins/inputs/linux_cpu/linux_cpu.go +++ b/plugins/inputs/linux_cpu/linux_cpu.go @@ -47,7 +47,7 @@ type prop struct { optional bool } -func (g *LinuxCPU) SampleConfig() string { +func (*LinuxCPU) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index da65773c46f39..4fe48035e7a5c 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -283,12 +283,7 @@ func (logstash *Logstash) gatherProcessStats(address string, accumulator telegra } // gatherPluginsStats go through a list of plugins and add their metrics to the accumulator -func (logstash *Logstash) gatherPluginsStats( - plugins []plugin, - pluginType string, - tags map[string]string, - accumulator telegraf.Accumulator, -) error { +func gatherPluginsStats(plugins []plugin, pluginType string, tags map[string]string, accumulator telegraf.Accumulator) error { for _, plugin := range plugins { pluginTags := map[string]string{ "plugin_name": plugin.Name, @@ -370,7 +365,7 @@ func (logstash *Logstash) gatherPluginsStats( return nil } -func (logstash *Logstash) gatherQueueStats(queue pipelineQueue, tags map[string]string, acc telegraf.Accumulator) error { +func gatherQueueStats(queue pipelineQueue, tags map[string]string, acc telegraf.Accumulator) error { queueTags := map[string]string{ "queue_type": queue.Type, } @@ -438,20 +433,20 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr } accumulator.AddFields("logstash_events", flattener.Fields, tags) - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) + err = gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) + err = gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) + err = gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) if err != nil { return err } - err = logstash.gatherQueueStats(pipelineStats.Pipeline.Queue, tags, accumulator) + err = gatherQueueStats(pipelineStats.Pipeline.Queue, tags, accumulator) if err != nil { return err } @@ -484,20 +479,20 @@ func (logstash *Logstash) gatherPipelinesStats(address string, accumulator teleg } accumulator.AddFields("logstash_events", flattener.Fields, tags) - err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) + err = gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) + err = gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) if err != nil { return err } - err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) + err = gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) if err != nil { return err } - err = logstash.gatherQueueStats(pipeline.Queue, tags, accumulator) + err = gatherQueueStats(pipeline.Queue, tags, accumulator) if err != nil { return err } diff --git a/plugins/inputs/lvm/lvm.go b/plugins/inputs/lvm/lvm.go index 0efb7270b9d5d..e1c246dddf7f9 100644 --- a/plugins/inputs/lvm/lvm.go +++ b/plugins/inputs/lvm/lvm.go @@ -33,10 +33,6 @@ func (*LVM) SampleConfig() string { return sampleConfig } -func (lvm *LVM) Init() error { - return nil -} - func (lvm *LVM) Gather(acc telegraf.Accumulator) error { if err := lvm.gatherPhysicalVolumes(acc); err != nil { return err diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index 37202fa300db0..0cf91573a88f8 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -128,14 +128,14 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } for _, serverAddress := range m.Servers { - acc.AddError(m.gatherServer(ctx, serverAddress, acc)) + acc.AddError(gatherServer(ctx, serverAddress, acc)) } return nil } // parseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) parseAddress(address string) (parsedAddress, protocol string, err error) { +func parseAddress(address string) (parsedAddress, protocol string, err error) { var host string var port string @@ -181,13 +181,13 @@ func (m *Mcrouter) parseAddress(address string) (parsedAddress, protocol string, return parsedAddress, protocol, nil } -func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { +func gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { var conn net.Conn var err error var protocol string var dialer net.Dialer - address, protocol, err = m.parseAddress(address) + address, protocol, err = parseAddress(address) if err != nil { return err } diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index 47f658d256afa..a0d1414ff7d0b 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -15,10 +15,6 @@ import ( ) func TestAddressParsing(t *testing.T) { - m := &Mcrouter{ - Servers: []string{"tcp://" + testutil.GetLocalHost()}, - } - var acceptTests = [][3]string{ {"tcp://localhost:8086", "localhost:8086", "tcp"}, {"tcp://localhost", "localhost:" + defaultServerURL.Port(), "tcp"}, @@ -32,7 +28,7 @@ func TestAddressParsing(t *testing.T) { } for _, args := range acceptTests { - address, protocol, err := m.parseAddress(args[0]) + address, protocol, err := parseAddress(args[0]) require.NoError(t, err, args[0]) require.Equal(t, args[1], address, args[0]) @@ -40,7 +36,7 @@ func TestAddressParsing(t *testing.T) { } for _, addr := range rejectTests { - address, protocol, err := m.parseAddress(addr) + address, protocol, err := parseAddress(addr) require.Error(t, err, addr) require.Empty(t, address, addr) diff --git a/plugins/inputs/modbus/configuration_metric.go b/plugins/inputs/modbus/configuration_metric.go index c0301728e0e39..959690a1eb89f 100644 --- a/plugins/inputs/modbus/configuration_metric.go +++ b/plugins/inputs/modbus/configuration_metric.go @@ -42,7 +42,7 @@ type configurationPerMetric struct { logger telegraf.Logger } -func (c *configurationPerMetric) sampleConfigPart() string { +func (*configurationPerMetric) sampleConfigPart() string { return sampleConfigPartPerMetric } @@ -366,7 +366,7 @@ func (c *configurationPerMetric) fieldID(seed maphash.Seed, def metricDefinition return mh.Sum64() } -func (c *configurationPerMetric) determineOutputDatatype(input string) (string, error) { +func (*configurationPerMetric) determineOutputDatatype(input string) (string, error) { // Handle our special types switch input { case "INT8L", "INT8H", "INT16", "INT32", "INT64": @@ -381,7 +381,7 @@ func (c *configurationPerMetric) determineOutputDatatype(input string) (string, return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) } -func (c *configurationPerMetric) determineFieldLength(input string, length uint16) (uint16, error) { +func (*configurationPerMetric) determineFieldLength(input string, length uint16) (uint16, error) { // Handle our special types switch input { case "BIT", "INT8L", "INT8H", "UINT8L", "UINT8H": diff --git a/plugins/inputs/modbus/configuration_register.go b/plugins/inputs/modbus/configuration_register.go index 9bd70caca6caa..9d47af5553298 100644 --- a/plugins/inputs/modbus/configuration_register.go +++ b/plugins/inputs/modbus/configuration_register.go @@ -31,7 +31,7 @@ type configurationOriginal struct { logger telegraf.Logger } -func (c *configurationOriginal) sampleConfigPart() string { +func (*configurationOriginal) sampleConfigPart() string { return sampleConfigPartPerRegister } @@ -43,19 +43,19 @@ func (c *configurationOriginal) check() error { return fmt.Errorf("invalid 'string_register_location' %q", c.workarounds.StringRegisterLocation) } - if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { + if err := validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { return err } - if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { + if err := validateFieldDefinitions(c.Coils, cCoils); err != nil { return err } - if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { + if err := validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { return err } - return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) + return validateFieldDefinitions(c.InputRegisters, cInputRegisters) } func (c *configurationOriginal) process() (map[byte]requestSet, error) { @@ -182,7 +182,7 @@ func (c *configurationOriginal) newFieldFromDefinition(def fieldDefinition, type return f, nil } -func (c *configurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { +func validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { nameEncountered := make(map[string]bool, len(fieldDefs)) for _, item := range fieldDefs { // check empty name @@ -276,7 +276,7 @@ func (c *configurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini return nil } -func (c *configurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { +func (*configurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { if dataType == "FLOAT32" { config.PrintOptionValueDeprecationNotice("input.modbus", "data_type", "FLOAT32", telegraf.DeprecationInfo{ Since: "1.16.0", @@ -323,7 +323,7 @@ func (c *configurationOriginal) normalizeInputDatatype(dataType string, words in return normalizeInputDatatype(dataType) } -func (c *configurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { +func (*configurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { // Handle our special types switch dataType { case "FIXED", "FLOAT32", "UFIXED": @@ -332,7 +332,7 @@ func (c *configurationOriginal) normalizeOutputDatatype(dataType string) (string return normalizeOutputDatatype("native") } -func (c *configurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { +func (*configurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { // Handle our special types switch byteOrder { case "AB", "ABCDEFGH": diff --git a/plugins/inputs/modbus/configuration_request.go b/plugins/inputs/modbus/configuration_request.go index 6288b0c1b5f99..13cfc36c3d710 100644 --- a/plugins/inputs/modbus/configuration_request.go +++ b/plugins/inputs/modbus/configuration_request.go @@ -45,7 +45,7 @@ type configurationPerRequest struct { logger telegraf.Logger } -func (c *configurationPerRequest) sampleConfigPart() string { +func (*configurationPerRequest) sampleConfigPart() string { return sampleConfigPartPerRequest } @@ -300,7 +300,7 @@ func (c *configurationPerRequest) newFieldFromDefinition(def requestFieldDefinit fieldLength := uint16(1) if typed { - if fieldLength, err = c.determineFieldLength(def.InputType, def.Length); err != nil { + if fieldLength, err = determineFieldLength(def.InputType, def.Length); err != nil { return field{}, err } } @@ -338,7 +338,7 @@ func (c *configurationPerRequest) newFieldFromDefinition(def requestFieldDefinit // For non-scaling cases we should choose the output corresponding to the input class // i.e. INT64 for INT*, UINT64 for UINT* etc. var err error - if def.OutputType, err = c.determineOutputDatatype(def.InputType); err != nil { + if def.OutputType, err = determineOutputDatatype(def.InputType); err != nil { return field{}, err } } else { @@ -406,7 +406,7 @@ func (c *configurationPerRequest) fieldID(seed maphash.Seed, def requestDefiniti return mh.Sum64() } -func (c *configurationPerRequest) determineOutputDatatype(input string) (string, error) { +func determineOutputDatatype(input string) (string, error) { // Handle our special types switch input { case "INT8L", "INT8H", "INT16", "INT32", "INT64": @@ -421,7 +421,7 @@ func (c *configurationPerRequest) determineOutputDatatype(input string) (string, return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) } -func (c *configurationPerRequest) determineFieldLength(input string, length uint16) (uint16, error) { +func determineFieldLength(input string, length uint16) (uint16, error) { // Handle our special types switch input { case "BIT", "INT8L", "INT8H", "UINT8L", "UINT8H": diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 0d95d3987ced6..eeb6577a8c5ae 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -251,22 +251,22 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { if !m.ExcludeRegisterTypeTag { tags["type"] = cCoils } - m.collectFields(grouper, timestamp, tags, requests.coil) + collectFields(grouper, timestamp, tags, requests.coil) if !m.ExcludeRegisterTypeTag { tags["type"] = cDiscreteInputs } - m.collectFields(grouper, timestamp, tags, requests.discrete) + collectFields(grouper, timestamp, tags, requests.discrete) if !m.ExcludeRegisterTypeTag { tags["type"] = cHoldingRegisters } - m.collectFields(grouper, timestamp, tags, requests.holding) + collectFields(grouper, timestamp, tags, requests.holding) if !m.ExcludeRegisterTypeTag { tags["type"] = cInputRegisters } - m.collectFields(grouper, timestamp, tags, requests.input) + collectFields(grouper, timestamp, tags, requests.input) // Add the metrics grouped by series to the accumulator for _, x := range grouper.Metrics() { @@ -532,7 +532,7 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { return nil } -func (m *Modbus) collectFields(grouper *metric.SeriesGrouper, timestamp time.Time, tags map[string]string, requests []request) { +func collectFields(grouper *metric.SeriesGrouper, timestamp time.Time, tags map[string]string, requests []request) { for _, request := range requests { for _, field := range request.fields { // Collect tags from global and per-request diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index cf4d79ce693ba..e83e51643cfd3 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -17,8 +17,8 @@ import ( type transportMock struct { } -func (t *transportMock) RoundTrip(_ *http.Request) (*http.Response, error) { - errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + +func (*transportMock) RoundTrip(*http.Request) (*http.Response, error) { + errorString := "get http://127.0.0.1:2812/_status?format=xml: " + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + "read: connection reset by peer" return nil, errors.New(errorString) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index a1ec7dd272eb1..32f5b7e9f1da5 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -64,15 +64,15 @@ type fakeParser struct{} // fakeParser satisfies telegraf.Parser var _ telegraf.Parser = &fakeParser{} -func (p *fakeParser) Parse(_ []byte) ([]telegraf.Metric, error) { +func (*fakeParser) Parse([]byte) ([]telegraf.Metric, error) { panic("not implemented") } -func (p *fakeParser) ParseLine(_ string) (telegraf.Metric, error) { +func (*fakeParser) ParseLine(string) (telegraf.Metric, error) { panic("not implemented") } -func (p *fakeParser) SetDefaultTags(_ map[string]string) { +func (*fakeParser) SetDefaultTags(map[string]string) { panic("not implemented") } @@ -84,15 +84,15 @@ type fakeToken struct { // fakeToken satisfies mqtt.Token var _ mqtt.Token = &fakeToken{} -func (t *fakeToken) Wait() bool { +func (*fakeToken) Wait() bool { return true } -func (t *fakeToken) WaitTimeout(time.Duration) bool { +func (*fakeToken) WaitTimeout(time.Duration) bool { return true } -func (t *fakeToken) Error() error { +func (*fakeToken) Error() error { return nil } @@ -166,7 +166,7 @@ type message struct { qos byte } -func (m *message) Duplicate() bool { +func (*message) Duplicate() bool { panic("not implemented") } @@ -174,7 +174,7 @@ func (m *message) Qos() byte { return m.qos } -func (m *message) Retained() bool { +func (*message) Retained() bool { panic("not implemented") } @@ -182,15 +182,15 @@ func (m *message) Topic() string { return m.topic } -func (m *message) MessageID() uint16 { +func (*message) MessageID() uint16 { panic("not implemented") } -func (m *message) Payload() []byte { +func (*message) Payload() []byte { return []byte("cpu time_idle=42i") } -func (m *message) Ack() { +func (*message) Ack() { panic("not implemented") } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index fb37dfba571cf..174b2ea3c10a8 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -461,7 +461,7 @@ func (m *Mysql) gatherServer(server *config.Secret, acc telegraf.Accumulator) er } if m.GatherBinaryLogs { - err = m.gatherBinaryLogs(db, servtag, acc) + err = gatherBinaryLogs(db, servtag, acc) if err != nil { return err } @@ -510,35 +510,35 @@ func (m *Mysql) gatherServer(server *config.Secret, acc telegraf.Accumulator) er } if m.GatherTableIOWaits { - err = m.gatherPerfTableIOWaits(db, servtag, acc) + err = gatherPerfTableIOWaits(db, servtag, acc) if err != nil { return err } } if m.GatherIndexIOWaits { - err = m.gatherPerfIndexIOWaits(db, servtag, acc) + err = gatherPerfIndexIOWaits(db, servtag, acc) if err != nil { return err } } if m.GatherTableLockWaits { - err = m.gatherPerfTableLockWaits(db, servtag, acc) + err = gatherPerfTableLockWaits(db, servtag, acc) if err != nil { return err } } if m.GatherEventWaits { - err = m.gatherPerfEventWaits(db, servtag, acc) + err = gatherPerfEventWaits(db, servtag, acc) if err != nil { return err } } if m.GatherFileEventsStats { - err = m.gatherPerfFileEventsStatuses(db, servtag, acc) + err = gatherPerfFileEventsStatuses(db, servtag, acc) if err != nil { return err } @@ -712,7 +712,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, servtag string, acc telegraf.Acc // gatherBinaryLogs can be used to collect size and count of all binary files // binlogs metric requires the MySQL server to turn it on in configuration -func (m *Mysql) gatherBinaryLogs(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherBinaryLogs(db *sql.DB, servtag string, acc telegraf.Accumulator) error { // run query rows, err := db.Query(binaryLogsQuery) if err != nil { @@ -1174,9 +1174,8 @@ func getColSlice(rows *sql.Rows) ([]interface{}, error) { return nil, fmt.Errorf("not Supported - %d columns", l) } -// gatherPerfTableIOWaits can be used to get total count and time -// of I/O wait event for each table and process -func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +// gatherPerfTableIOWaits can be used to get total count and time of I/O wait event for each table and process +func gatherPerfTableIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfTableIOWaitsQuery) if err != nil { return err @@ -1221,9 +1220,8 @@ func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, servtag string, acc telegraf. return nil } -// gatherPerfIndexIOWaits can be used to get total count and time -// of I/O wait event for each index and process -func (m *Mysql) gatherPerfIndexIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +// gatherPerfIndexIOWaits can be used to get total count and time of I/O wait event for each index and process +func gatherPerfIndexIOWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfIndexIOWaitsQuery) if err != nil { return err @@ -1500,7 +1498,7 @@ func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, servtag string, // the total number and time for SQL and external lock wait events // for each table and operation // requires the MySQL server to be enabled to save this metric -func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { // check if table exists, // if performance_schema is not enabled, tables do not exist // then there is no need to scan them @@ -1627,7 +1625,7 @@ func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegra } // gatherPerfEventWaits can be used to get total time and number of event waits -func (m *Mysql) gatherPerfEventWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherPerfEventWaits(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfEventWaitsQuery) if err != nil { return err @@ -1658,7 +1656,7 @@ func (m *Mysql) gatherPerfEventWaits(db *sql.DB, servtag string, acc telegraf.Ac } // gatherPerfFileEvents can be used to get stats on file events -func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, servtag string, acc telegraf.Accumulator) error { +func gatherPerfFileEventsStatuses(db *sql.DB, servtag string, acc telegraf.Accumulator) error { rows, err := db.Query(perfFileEventsQuery) if err != nil { return err diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 7904800499d89..43531cc53e912 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -186,7 +186,7 @@ func (n *NatsConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (n *NatsConsumer) Gather(_ telegraf.Accumulator) error { +func (*NatsConsumer) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index d5485959177c7..97e02652419bf 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -83,12 +83,12 @@ func (n *NeptuneApex) gatherServer( if err != nil { return err } - return n.parseXML(acc, resp) + return parseXML(acc, resp) } // parseXML is strict on the input and does not do best-effort parsing. // This is because of the life-support nature of the Neptune Apex. -func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error { +func parseXML(acc telegraf.Accumulator, data []byte) error { r := xmlReply{} err := xml.Unmarshal(data, &r) if err != nil { diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index a64374cd22bde..a6f65ec96ec81 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -57,9 +57,7 @@ func TestGather(t *testing.T) { } func TestParseXML(t *testing.T) { - n := &NeptuneApex{} - goodTime := time.Date(2018, 12, 22, 21, 55, 37, 0, - time.FixedZone("PST", 3600*-8)) + goodTime := time.Date(2018, 12, 22, 21, 55, 37, 0, time.FixedZone("PST", 3600*-8)) tests := []struct { name string xmlResponse []byte @@ -363,7 +361,7 @@ func TestParseXML(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator - err := n.parseXML(&acc, test.xmlResponse) + err := parseXML(&acc, test.xmlResponse) if test.wantErr { require.Error(t, err, "expected error but got ") return diff --git a/plugins/inputs/netflow/netflow.go b/plugins/inputs/netflow/netflow.go index 218d9b5296dba..162a6d82db6a2 100644 --- a/plugins/inputs/netflow/netflow.go +++ b/plugins/inputs/netflow/netflow.go @@ -114,7 +114,7 @@ func (n *NetFlow) Start(acc telegraf.Accumulator) error { return nil } -func (n *NetFlow) Gather(_ telegraf.Accumulator) error { +func (*NetFlow) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/netflow/netflow_v5.go b/plugins/inputs/netflow/netflow_v5.go index 839a1d0943598..dadf3df0f2c80 100644 --- a/plugins/inputs/netflow/netflow_v5.go +++ b/plugins/inputs/netflow/netflow_v5.go @@ -15,14 +15,14 @@ import ( // Decoder structure type netflowv5Decoder struct{} -func (d *netflowv5Decoder) init() error { +func (*netflowv5Decoder) init() error { if err := initL4ProtoMapping(); err != nil { return fmt.Errorf("initializing layer 4 protocol mapping failed: %w", err) } return nil } -func (d *netflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { +func (*netflowv5Decoder) decode(srcIP net.IP, payload []byte) ([]telegraf.Metric, error) { src := srcIP.String() // Decode the message diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index c1d02e5cae9f9..68edaa0cacd32 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -160,7 +160,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator te fields := map[string]interface{}{ "status": server.Status, - "status_code": check.getStatusCode(server.Status), + "status_code": getStatusCode(server.Status), "rise": server.Rise, "fall": server.Fall, } @@ -171,7 +171,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator te return nil } -func (check *NginxUpstreamCheck) getStatusCode(status string) uint8 { +func getStatusCode(status string) uint8 { switch status { case "up": return 1 diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 69f2a0aea73a1..1516e4f2a1417 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -138,7 +138,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil } -func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { +func (*NSQConsumer) Gather(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 695b8c6f601ee..e4714b0ff37f8 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -51,7 +51,7 @@ func (smi *NvidiaSMI) Start(telegraf.Accumulator) error { return nil } -func (smi *NvidiaSMI) Stop() {} +func (*NvidiaSMI) Stop() {} // Gather implements the telegraf interface func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/opentelemetry/writer.go b/plugins/inputs/opentelemetry/writer.go index b7701678edc17..6af00b6eb4af1 100644 --- a/plugins/inputs/opentelemetry/writer.go +++ b/plugins/inputs/opentelemetry/writer.go @@ -49,6 +49,6 @@ func (w *writeToAccumulator) EnqueuePoint( return nil } -func (w *writeToAccumulator) WriteBatch(_ context.Context) error { +func (*writeToAccumulator) WriteBatch(context.Context) error { return nil } diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 20709aaf750d9..1e7eb4a63aab7 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -104,7 +104,7 @@ func (pf *PF) Gather(acc telegraf.Accumulator) error { return nil } - if perr := pf.parsePfctlOutput(o, acc); perr != nil { + if perr := parsePfctlOutput(o, acc); perr != nil { acc.AddError(perr) } return nil @@ -114,7 +114,7 @@ func errMissingData(tag string) error { return fmt.Errorf("struct data for tag %q not found in %s output", tag, pfctlCommand) } -func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error { +func parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(strings.NewReader(pfoutput)) for scanner.Scan() { diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index d039685bb05f8..73f14cb776af9 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -72,7 +72,7 @@ type nilCloser struct { io.ReadWriter } -func (c *nilCloser) Close() error { return nil } +func (*nilCloser) Close() error { return nil } func TestStreams(t *testing.T) { var rec record @@ -125,11 +125,11 @@ func (c *writeOnlyConn) Write(p []byte) (int, error) { return len(p), nil } -func (c *writeOnlyConn) Read(_ []byte) (int, error) { +func (*writeOnlyConn) Read([]byte) (int, error) { return 0, errors.New("conn is write-only") } -func (c *writeOnlyConn) Close() error { +func (*writeOnlyConn) Close() error { return nil } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 802c761532ccc..d267b57ca2f28 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -31,7 +31,7 @@ import ( type statServer struct{} // We create a fake server to return test data -func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { +func (statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", strconv.Itoa(len(outputSample))) fmt.Fprint(w, outputSample) diff --git a/plugins/inputs/powerdns/powerdns_linux_test.go b/plugins/inputs/powerdns/powerdns_linux_test.go index 772bee4c4d46d..5bb576759a9f4 100644 --- a/plugins/inputs/powerdns/powerdns_linux_test.go +++ b/plugins/inputs/powerdns/powerdns_linux_test.go @@ -13,9 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -type statServer struct{} - -func (s statServer) serverSocket(l net.Listener) { +func serverSocket(l net.Listener) { for { conn, err := l.Accept() if err != nil { @@ -46,8 +44,7 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { defer socket.Close() - s := statServer{} - go s.serverSocket(socket) + go serverSocket(socket) p := &Powerdns{ UnixSockets: []string{sockname}, diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 48a77518f5a6a..3fd8e19c55a6c 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -53,7 +53,7 @@ func (p *PowerdnsRecursor) Init() error { case 2: p.gatherFromServer = p.gatherFromV2Server case 3: - p.gatherFromServer = p.gatherFromV3Server + p.gatherFromServer = gatherFromV3Server default: return fmt.Errorf("unknown control protocol version '%d', allowed values are 1, 2, 3", p.ControlProtocolVersion) } diff --git a/plugins/inputs/powerdns_recursor/protocol_v3.go b/plugins/inputs/powerdns_recursor/protocol_v3.go index b6e04e5ea58bb..9dbc9bd776fe4 100644 --- a/plugins/inputs/powerdns_recursor/protocol_v3.go +++ b/plugins/inputs/powerdns_recursor/protocol_v3.go @@ -16,7 +16,7 @@ import ( // status: uint32 // dataLength: size_t // data: byte[dataLength] -func (p *PowerdnsRecursor) gatherFromV3Server(address string, acc telegraf.Accumulator) error { +func gatherFromV3Server(address string, acc telegraf.Accumulator) error { conn, err := net.Dial("unix", address) if err != nil { return err diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 192a431acd503..976cc79636ef7 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -14,7 +14,7 @@ import ( type NativeFinder struct{} // Uid will return all pids for the given user -func (pg *NativeFinder) uid(user string) ([]pid, error) { +func (*NativeFinder) uid(user string) ([]pid, error) { var dst []pid procs, err := gopsprocess.Processes() if err != nil { @@ -34,7 +34,7 @@ func (pg *NativeFinder) uid(user string) ([]pid, error) { } // PidFile returns the pid from the pid file given. -func (pg *NativeFinder) pidFile(path string) ([]pid, error) { +func (*NativeFinder) pidFile(path string) ([]pid, error) { var pids []pid pidString, err := os.ReadFile(path) if err != nil { @@ -49,13 +49,13 @@ func (pg *NativeFinder) pidFile(path string) ([]pid, error) { } // FullPattern matches on the command line when the process was executed -func (pg *NativeFinder) fullPattern(pattern string) ([]pid, error) { +func (*NativeFinder) fullPattern(pattern string) ([]pid, error) { var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.fastProcessList() + procs, err := fastProcessList() if err != nil { return pids, err } @@ -73,7 +73,7 @@ func (pg *NativeFinder) fullPattern(pattern string) ([]pid, error) { } // Children matches children pids on the command line when the process was executed -func (pg *NativeFinder) children(processID pid) ([]pid, error) { +func (*NativeFinder) children(processID pid) ([]pid, error) { // Get all running processes p, err := gopsprocess.NewProcess(int32(processID)) if err != nil { @@ -93,7 +93,7 @@ func (pg *NativeFinder) children(processID pid) ([]pid, error) { return pids, err } -func (pg *NativeFinder) fastProcessList() ([]*gopsprocess.Process, error) { +func fastProcessList() ([]*gopsprocess.Process, error) { pids, err := gopsprocess.Pids() if err != nil { return nil, err @@ -107,13 +107,13 @@ func (pg *NativeFinder) fastProcessList() ([]*gopsprocess.Process, error) { } // Pattern matches on the process name -func (pg *NativeFinder) pattern(pattern string) ([]pid, error) { +func (*NativeFinder) pattern(pattern string) ([]pid, error) { var pids []pid regxPattern, err := regexp.Compile(pattern) if err != nil { return pids, err } - procs, err := pg.fastProcessList() + procs, err := fastProcessList() if err != nil { return pids, err } diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index add3a2dfb120d..8e61fff4449e6 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -23,7 +23,7 @@ func newPgrepFinder() (pidFinder, error) { return &pgrep{path}, nil } -func (pg *pgrep) pidFile(path string) ([]pid, error) { +func (*pgrep) pidFile(path string) ([]pid, error) { var pids []pid pidString, err := os.ReadFile(path) if err != nil { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6bf1e8402dc69..ecc8a978105be 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -617,7 +617,7 @@ func (p *Procstat) cgroupPIDs() ([]pidsTags, error) { pidTags := make([]pidsTags, 0, len(items)) for _, item := range items { - pids, err := p.singleCgroupPIDs(item) + pids, err := singleCgroupPIDs(item) if err != nil { return nil, err } @@ -628,7 +628,7 @@ func (p *Procstat) cgroupPIDs() ([]pidsTags, error) { return pidTags, nil } -func (p *Procstat) singleCgroupPIDs(path string) ([]pid, error) { +func singleCgroupPIDs(path string) ([]pid, error) { ok, err := isDir(path) if err != nil { return nil, err diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 4256f08e24234..85282ffb5df46 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -126,7 +126,7 @@ func (p *testProc) pid() pid { return p.procID } -func (p *testProc) Name() (string, error) { +func (*testProc) Name() (string, error) { return "test_proc", nil } @@ -134,7 +134,7 @@ func (p *testProc) setTag(k, v string) { p.tags[k] = v } -func (p *testProc) MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) { +func (*testProc) MemoryMaps(bool) (*[]gopsprocess.MemoryMapsStat, error) { stats := make([]gopsprocess.MemoryMapsStat, 0) return &stats, nil } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 2c4ef136c18ca..f1b303ecdb977 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -419,7 +419,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { tags[k] = v } } - podURL := p.addressToURL(targetURL, targetURL.Hostname()) + podURL := addressToURL(targetURL, targetURL.Hostname()) // Locks earlier if using cAdvisor calls - makes a new list each time // rather than updating and removing from the same list diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 8b557a9cab979..85d2de1f41cba 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -338,7 +338,7 @@ func (p *Prometheus) initFilters() error { return nil } -func (p *Prometheus) addressToURL(u *url.URL, address string) *url.URL { +func addressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { host = address + ":" + u.Port() @@ -393,7 +393,7 @@ func (p *Prometheus) getAllURLs() (map[string]urlAndAddress, error) { continue } for _, resolved := range resolvedAddresses { - serviceURL := p.addressToURL(address, resolved) + serviceURL := addressToURL(address, resolved) allURLs[serviceURL.String()] = urlAndAddress{ url: serviceURL, address: resolved, diff --git a/plugins/inputs/radius/radius.go b/plugins/inputs/radius/radius.go index 984f31d93c2a9..efb71a1df9f2e 100644 --- a/plugins/inputs/radius/radius.go +++ b/plugins/inputs/radius/radius.go @@ -32,7 +32,7 @@ type Radius struct { //go:embed sample.conf var sampleConfig string -func (r *Radius) SampleConfig() string { +func (*Radius) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 762d2af810ef3..de2c5a82bc458 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -89,7 +89,7 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { if err != nil { return err } - tags := r.getTags(addr) + tags := getTags(addr) fields := map[string]interface{}{ "calling": calling, "writing": writing, @@ -153,7 +153,7 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } // Get tag(s) for the raindrops calling/writing plugin -func (r *Raindrops) getTags(addr *url.URL) map[string]string { +func getTags(addr *url.URL) map[string]string { h := addr.Host host, port, err := net.SplitHostPort(h) if err != nil { diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index 82def94f1484e..ac3c8692e96bb 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -35,11 +35,10 @@ writing: 200 // Verify that raindrops tags are properly parsed based on the server func TestRaindropsTags(t *testing.T) { urls := []string{"http://localhost/_raindrops", "http://localhost:80/_raindrops"} - r := &Raindrops{} for _, url1 := range urls { addr, err := url.Parse(url1) require.NoError(t, err) - tagMap := r.getTags(addr) + tagMap := getTags(addr) require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 4f31f6dda18b5..e0a19f3f18760 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -315,7 +315,7 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(client Client) { defer wg.Done() - acc.AddError(r.gatherServer(client, acc)) + acc.AddError(gatherServer(client, acc)) acc.AddError(r.gatherCommandValues(client, acc)) }(client) } @@ -344,7 +344,7 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err return nil } -func (r *Redis) gatherServer(client Client, acc telegraf.Accumulator) error { +func gatherServer(client Client, acc telegraf.Accumulator) error { info, err := client.Info().Result() if err != nil { return err @@ -774,7 +774,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value { return reflect.ValueOf(value) } -func (r *Redis) Start(telegraf.Accumulator) error { +func (*Redis) Start(telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 0e96c49c358fe..f8f0d5b540f4d 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -17,19 +17,19 @@ import ( type testClient struct{} -func (t *testClient) BaseTags() map[string]string { +func (*testClient) BaseTags() map[string]string { return map[string]string{"host": "redis.net"} } -func (t *testClient) Info() *redis.StringCmd { +func (*testClient) Info() *redis.StringCmd { return nil } -func (t *testClient) Do(_ string, _ ...interface{}) (interface{}, error) { +func (*testClient) Do(string, ...interface{}) (interface{}, error) { return 2, nil } -func (t *testClient) Close() error { +func (*testClient) Close() error { return nil } diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 79c42f583b1c2..2daf19312b4a2 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -30,7 +30,7 @@ func (*RethinkDB) SampleConfig() string { // Returns one of the errors encountered while gather stats (if any). func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { if len(r.Servers) == 0 { - return r.gatherServer(localhost, acc) + return gatherServer(localhost, acc) } var wg sync.WaitGroup @@ -47,7 +47,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func() { defer wg.Done() - acc.AddError(r.gatherServer(&Server{URL: u}, acc)) + acc.AddError(gatherServer(&Server{URL: u}, acc)) }() } @@ -56,7 +56,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { return nil } -func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error { +func gatherServer(server *Server, acc telegraf.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ Address: server.URL.Host, diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 526b7fda67fd9..e269f1bea8417 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -275,7 +275,7 @@ func (*RiemannSocketListener) SampleConfig() string { return sampleConfig } -func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { +func (*RiemannSocketListener) Gather(telegraf.Accumulator) error { return nil } From edefa495deb43aaac7668750853f15489b8af919 Mon Sep 17 00:00:00 2001 From: wenweihuang Date: Thu, 19 Dec 2024 15:46:55 +0800 Subject: [PATCH 170/170] Merge branch b16210 --- go.sum | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/go.sum b/go.sum index 8f152edf1bd6c..eecd1445c8d47 100644 --- a/go.sum +++ b/go.sum @@ -2716,10 +2716,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=