diff --git a/.golangci.yml b/.golangci.yml index a0165ead708dd..9821917770265 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -267,6 +267,8 @@ linters-settings: exclude: [ "TEST" ] - name: enforce-repeated-arg-type-style arguments: ["short"] + - name: enforce-slice-style + arguments: ["make"] - name: error-naming - name: error-return - name: error-strings @@ -340,6 +342,7 @@ linters-settings: - expected-actual - float-compare - formatter + - go-require - len - negative-positive - nil-compare diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 3d660e8657724..6e5dc66c5daac 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -127,7 +127,7 @@ func TestSetPrecision(t *testing.T) { func TestAddTrackingMetricGroupEmpty(t *testing.T) { ch := make(chan telegraf.Metric, 10) - metrics := []telegraf.Metric{} + metrics := make([]telegraf.Metric, 0) acc := NewAccumulator(&TestMetricMaker{}, ch).WithTracking(1) id := acc.AddTrackingMetricGroup(metrics) diff --git a/agent/agent.go b/agent/agent.go index 461779705d534..6ebc4fcdc6a1a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -106,6 +106,11 @@ func (a *Agent) Run(ctx context.Context) error { time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) + log.Printf("D! [agent] Initializing plugins") + if err := a.InitPlugins(); err != nil { + return err + } + if a.Config.Persister != nil { log.Printf("D! [agent] Initializing plugin states") if err := a.initPersister(); err != nil { @@ -119,11 +124,6 @@ func (a *Agent) Run(ctx context.Context) error { } } - log.Printf("D! [agent] Initializing plugins") - if err := a.InitPlugins(); err != nil { - return err - } - startTime := time.Now() log.Printf("D! [agent] Connecting outputs") diff --git a/agent/agent_test.go b/agent/agent_test.go index 849b491b766eb..9741dfe93f151 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -83,7 +83,6 @@ func TestAgent_LoadOutput(t *testing.T) { require.Len(t, a.Config.Outputs, 1) c = config.NewConfig() - c.OutputFilters = []string{} err = c.LoadConfig("../config/testdata/telegraf-agent.toml") require.NoError(t, err) a = NewAgent(c) diff --git a/agent/tick_test.go b/agent/tick_test.go index 001ac85d954f2..078d50be92b57 100644 --- a/agent/tick_test.go +++ b/agent/tick_test.go @@ -37,8 +37,7 @@ func TestAlignedTicker(t *testing.T) { time.Unix(60, 0).UTC(), } - actual := []time.Time{} - + actual := make([]time.Time, 0) clk.Add(10 * time.Second) for !clk.Now().After(until) { tm := <-ticker.Elapsed() @@ -109,8 +108,7 @@ func TestAlignedTickerOffset(t *testing.T) { time.Unix(53, 0).UTC(), } - actual := []time.Time{} - + actual := make([]time.Time, 0) clk.Add(10*time.Second + offset) for !clk.Now().After(until) { tm := <-ticker.Elapsed() @@ -174,7 +172,7 @@ func TestUnalignedTicker(t *testing.T) { time.Unix(61, 0).UTC(), } - actual := []time.Time{} + actual := make([]time.Time, 0) for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): @@ -215,7 +213,7 @@ func TestRollingTicker(t *testing.T) { time.Unix(61, 0).UTC(), } - actual := []time.Time{} + actual := make([]time.Time, 0) for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): diff --git a/cmd/telegraf/cmd_win_service_notwindows.go b/cmd/telegraf/cmd_win_service_notwindows.go index ebf58cd514508..258d5c95d3826 100644 --- a/cmd/telegraf/cmd_win_service_notwindows.go +++ b/cmd/telegraf/cmd_win_service_notwindows.go @@ -9,7 +9,7 @@ import ( ) func cliFlags() []cli.Flag { - return []cli.Flag{} + return make([]cli.Flag, 0) } func getServiceCommands(io.Writer) []*cli.Command { diff --git a/cmd/telegraf/printer.go b/cmd/telegraf/printer.go index 02428125cff87..de92d6f5be6c4 100644 --- a/cmd/telegraf/printer.go +++ b/cmd/telegraf/printer.go @@ -24,7 +24,7 @@ var ( inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", "processes", "disk", "diskio"} // Default output plugins - outputDefaults = []string{} + outputDefaults = make([]string, 0) ) var header = `# Telegraf Configuration @@ -126,7 +126,7 @@ func printSampleConfig(outputBuffer io.Writer, filters Filters) { printFilteredSecretstores(secretstoreFilters, false, outputBuffer) } else { fmt.Print(secretstoreHeader) - snames := []string{} + snames := make([]string, 0, len(secretstores.SecretStores)) for sname := range secretstores.SecretStores { snames = append(snames, sname) } @@ -165,7 +165,7 @@ func printSampleConfig(outputBuffer io.Writer, filters Filters) { printFilteredProcessors(processorFilters, false, outputBuffer) } else { outputBuffer.Write([]byte(processorHeader)) - pnames := []string{} + pnames := make([]string, 0, len(processors.Processors)) for pname := range processors.Processors { pnames = append(pnames, pname) } @@ -182,7 +182,7 @@ func printSampleConfig(outputBuffer io.Writer, filters Filters) { printFilteredAggregators(aggregatorFilters, false, outputBuffer) } else { outputBuffer.Write([]byte(aggregatorHeader)) - pnames := []string{} + pnames := make([]string, 0, len(aggregators.Aggregators)) for pname := range aggregators.Aggregators { pnames = append(pnames, pname) } @@ -261,7 +261,7 @@ func printFilteredInputs(inputFilters []string, commented bool, outputBuffer io. // cache service inputs to print them at the end servInputs := make(map[string]telegraf.ServiceInput) // for alphabetical looping: - servInputNames := []string{} + servInputNames := make([]string, 0, len(pnames)) // Print Inputs for _, pname := range pnames { diff --git a/config/config.go b/config/config.go index 6cf52b9aa0f67..d80033a6050f1 100644 --- a/config/config.go +++ b/config/config.go @@ -434,7 +434,7 @@ func GetDefaultConfigPath() ([]string, error) { // At this point we need to check if the files under /etc/telegraf are // populated and return them all. - confFiles := []string{} + confFiles := make([]string, 0) if _, err := os.Stat(etcfile); err == nil { confFiles = append(confFiles, etcfile) } @@ -1805,7 +1805,7 @@ func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string) []models.Ta } func keys(m map[string]bool) []string { - result := []string{} + result := make([]string, 0, len(m)) for k := range m { result = append(result, k) } diff --git a/config/config_test.go b/config/config_test.go index 9a50f2cabe348..7af2a790a5772 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -479,8 +479,6 @@ func TestConfig_InlineTables(t *testing.T) { } func TestConfig_SliceComment(t *testing.T) { - t.Skipf("Skipping until #3642 is resolved") - c := config.NewConfig() require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) require.Len(t, c.Outputs, 1) @@ -520,8 +518,11 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { func TestGetDefaultConfigPathFromEnvURL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - _, err := w.Write([]byte("[agent]\ndebug = true")) - require.NoError(t, err) + if _, err := w.Write([]byte("[agent]\ndebug = true")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -1575,7 +1576,6 @@ func (m *MockupStatePlugin) Init() error { } m.state = MockupState{ Name: "mockup", - Bits: []int{}, Modified: t0, } diff --git a/config/secret_test.go b/config/secret_test.go index 90370e9323a10..6422c5a97d0e5 100644 --- a/config/secret_test.go +++ b/config/secret_test.go @@ -67,7 +67,7 @@ func TestGettingMissingResolver(t *testing.T) { mysecret := "a @{referenced:secret}" s := NewSecret([]byte(mysecret)) defer s.Destroy() - s.unlinked = []string{} + s.unlinked = make([]string, 0) s.resolvers = map[string]telegraf.ResolveFunc{ "@{a:dummy}": func() ([]byte, bool, error) { return nil, false, nil @@ -82,7 +82,7 @@ func TestGettingResolverError(t *testing.T) { mysecret := "a @{referenced:secret}" s := NewSecret([]byte(mysecret)) defer s.Destroy() - s.unlinked = []string{} + s.unlinked = make([]string, 0) s.resolvers = map[string]telegraf.ResolveFunc{ "@{referenced:secret}": func() ([]byte, bool, error) { return nil, false, errors.New("broken") @@ -111,7 +111,7 @@ func TestEnclaveOpenError(t *testing.T) { err := s.Link(map[string]telegraf.ResolveFunc{}) require.ErrorContains(t, err, "opening enclave failed") - s.unlinked = []string{} + s.unlinked = make([]string, 0) _, err = s.Get() require.ErrorContains(t, err, "opening enclave failed") } diff --git a/docs/specs/tsd-003-state-persistence.md b/docs/specs/tsd-003-state-persistence.md index 15470ab4f2335..5a482f737e0bb 100644 --- a/docs/specs/tsd-003-state-persistence.md +++ b/docs/specs/tsd-003-state-persistence.md @@ -31,15 +31,17 @@ It is intended to The persistence will use the following steps: +- Compute an unique ID for each of the plugin _instances_ +- Startup Telegraf plugins calling `Init()`, etc. - Initialize persistence framework with the user specified `statefile` location and load the state if present - Determine all stateful plugin instances by fulfilling the `StatefulPlugin` interface -- Compute an unique ID for each of the plugin _instances_ - Restore plugin states (if any) for each plugin ID present in the state-file -- Startup Telegraf plugins calling `Init()`, etc. - Run data-collection etc... -- On shutdown, query the state of all registered stateful plugins state +- On shutdown, stopping all Telegraf plugins calling `Stop()` or `Close()` + depending on the plugin type +- Query the state of all registered stateful plugins state - Create an overall state-map with the plugin instance ID as a key and the serialized plugin state as value. - Marshal the overall state-map and store to disk @@ -85,7 +87,7 @@ for the overall state. On-disk, the overall state of Telegraf is stored as JSON. To restore the state of a plugin, the overall Telegraf state is first deserialized from the on-disk JSON data and a lookup for the plugin ID is performed in the resulting map. The value, if found, is then deserialized to the -plugin's state data-structure and provided to the plugin before calling `Init()`. +plugin's state data-structure and provided to the plugin after calling `Init()`. ## Is / Is-not diff --git a/filter/filter_test.go b/filter/filter_test.go index 05a78bd4aa46e..8842d2a74a6e5 100644 --- a/filter/filter_test.go +++ b/filter/filter_test.go @@ -7,7 +7,7 @@ import ( ) func TestCompile(t *testing.T) { - f, err := Compile([]string{}) + f, err := Compile(nil) require.NoError(t, err) require.Nil(t, f) @@ -50,10 +50,10 @@ func TestCompile(t *testing.T) { } func TestIncludeExclude(t *testing.T) { - tags := []string{} labels := []string{"best", "com_influxdata", "timeseries", "com_influxdata_telegraf", "ever"} + tags := make([]string, 0, len(labels)) - filter, err := NewIncludeExcludeFilter([]string{}, []string{"com_influx*"}) + filter, err := NewIncludeExcludeFilter(nil, []string{"com_influx*"}) if err != nil { t.Fatalf("Failed to create include/exclude filter - %v", err) } diff --git a/internal/process/process.go b/internal/process/process.go index faedf21790665..c471060d11b38 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -45,7 +45,7 @@ func New(command, envs []string) (*Process, error) { p := &Process{ RestartDelay: 5 * time.Second, name: command[0], - args: []string{}, + args: make([]string, 0), envs: envs, } diff --git a/internal/snmp/field_test.go b/internal/snmp/field_test.go index d149731e4dd48..d2b604d936f53 100644 --- a/internal/snmp/field_test.go +++ b/internal/snmp/field_test.go @@ -181,7 +181,7 @@ func TestConvertHextoint(t *testing.T) { { name: "big endian invalid", conversion: "hextoint:BigEndian:invalid", - ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: []uint8{}}, + ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: make([]uint8, 0)}, errmsg: "invalid bit value", }, { @@ -223,13 +223,13 @@ func TestConvertHextoint(t *testing.T) { { name: "little endian invalid", conversion: "hextoint:LittleEndian:invalid", - ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: []byte{}}, + ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: make([]byte, 0)}, errmsg: "invalid bit value", }, { name: "invalid", conversion: "hextoint:invalid:uint64", - ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: []byte{}}, + ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: make([]byte, 0)}, errmsg: "invalid Endian value", }, } diff --git a/internal/snmp/mib_loader.go b/internal/snmp/mib_loader.go index de7cede858060..d6641767d5791 100644 --- a/internal/snmp/mib_loader.go +++ b/internal/snmp/mib_loader.go @@ -92,8 +92,8 @@ func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) err // should walk the paths given and find all folders func walkPaths(paths []string, log telegraf.Logger) ([]string, error) { once.Do(gosmi.Init) - folders := []string{} + folders := make([]string, 0) for _, mibPath := range paths { // Check if we loaded that path already and skip it if so m.Lock() diff --git a/internal/snmp/translator_netsnmp_mocks_test.go b/internal/snmp/translator_netsnmp_mocks_test.go index d74540cfa2da0..72a00faed1da5 100644 --- a/internal/snmp/translator_netsnmp_mocks_test.go +++ b/internal/snmp/translator_netsnmp_mocks_test.go @@ -28,7 +28,7 @@ func TestMockExecCommand(_ *testing.T) { var cmd []string //nolint:prealloc // Pre-allocated this slice would break the algorithm for _, arg := range os.Args { if arg == "--" { - cmd = []string{} + cmd = make([]string, 0) continue } if cmd == nil { diff --git a/metric/series_grouper.go b/metric/series_grouper.go index d20470de50c7e..15f0cc5f16d67 100644 --- a/metric/series_grouper.go +++ b/metric/series_grouper.go @@ -23,7 +23,7 @@ import ( func NewSeriesGrouper() *SeriesGrouper { return &SeriesGrouper{ metrics: make(map[uint64]telegraf.Metric), - ordered: []telegraf.Metric{}, + ordered: make([]telegraf.Metric, 0), hashSeed: maphash.MakeSeed(), } } diff --git a/migrations/inputs_httpjson/migration_test.go b/migrations/inputs_httpjson/migration_test.go index fda159bcc8146..9d77d2a03885c 100644 --- a/migrations/inputs_httpjson/migration_test.go +++ b/migrations/inputs_httpjson/migration_test.go @@ -112,8 +112,11 @@ func TestParsing(t *testing.T) { // Start the test-server server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/stats" { - _, err = w.Write(input) - require.NoError(t, err) + if _, err = w.Write(input); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } else { w.WriteHeader(http.StatusNotFound) } diff --git a/models/buffer_disk.go b/models/buffer_disk.go index 09e338224762f..57836dbab9070 100644 --- a/models/buffer_disk.go +++ b/models/buffer_disk.go @@ -127,7 +127,7 @@ func (b *DiskBuffer) Batch(batchSize int) []telegraf.Metric { if b.length() == 0 { // no metrics in the wal file, so return an empty array - return []telegraf.Metric{} + return make([]telegraf.Metric, 0) } b.batchFirst = b.readIndex() var metrics []telegraf.Metric diff --git a/models/filter.go b/models/filter.go index 34ce0b33b314e..fe05684b10496 100644 --- a/models/filter.go +++ b/models/filter.go @@ -206,7 +206,7 @@ func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool { // filterFields removes fields according to fieldinclude/fieldexclude. func (f *Filter) filterFields(metric telegraf.Metric) { - filterKeys := []string{} + filterKeys := make([]string, 0, len(metric.FieldList())) for _, field := range metric.FieldList() { if !ShouldPassFilters(f.fieldIncludeFilter, f.fieldExcludeFilter, field.Key) { filterKeys = append(filterKeys, field.Key) @@ -220,7 +220,7 @@ func (f *Filter) filterFields(metric telegraf.Metric) { // filterTags removes tags according to taginclude/tagexclude. func (f *Filter) filterTags(metric telegraf.Metric) { - filterKeys := []string{} + filterKeys := make([]string, 0, len(metric.TagList())) for _, tag := range metric.TagList() { if !ShouldPassFilters(f.tagIncludeFilter, f.tagExcludeFilter, tag.Key) { filterKeys = append(filterKeys, tag.Key) diff --git a/models/running_output_test.go b/models/running_output_test.go index 6054cf359a5fb..c045dcf0140f2 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -777,10 +777,6 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { return errors.New("failed write") } - if m.metrics == nil { - m.metrics = []telegraf.Metric{} - } - m.metrics = append(m.metrics, metrics...) return nil } diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 12a2b4233839f..f33bd2bea3d19 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -718,7 +718,7 @@ func TestBasicStatsWithAllStats(t *testing.T) { // Test that if an empty array is passed, no points are pushed func TestBasicStatsWithNoStats(t *testing.T) { aggregator := NewBasicStats() - aggregator.Stats = []string{} + aggregator.Stats = make([]string, 0) aggregator.Log = testutil.Logger{} aggregator.initConfiguredStats() diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index 32150fc0c3daa..66230f097f853 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -139,9 +139,8 @@ func (h *HistogramAggregator) Add(in telegraf.Metric) { // Push returns histogram values for metrics func (h *HistogramAggregator) Push(acc telegraf.Accumulator) { - metricsWithGroupedFields := []groupedByCountFields{} now := timeNow() - + metricsWithGroupedFields := make([]groupedByCountFields, 0) for id, aggregate := range h.cache { if h.ExpirationInterval != 0 && now.After(aggregate.expireTime) { delete(h.cache, id) diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index d801483eae0a0..afdc877a035d4 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -63,7 +63,11 @@ func newFakeServer(t *testing.T) fakeServer { authed() case authEndpointWithBody: body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } if !cmp.Equal([]byte(reqBody), body) { w.WriteHeader(http.StatusUnauthorized) return @@ -89,8 +93,11 @@ func newFakeServer(t *testing.T) fakeServer { w.WriteHeader(http.StatusForbidden) return } - _, err := w.Write([]byte("good test response")) - require.NoError(t, err) + if _, err := w.Write([]byte("good test response")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } })), int32: &c, diff --git a/plugins/common/starlark/starlark.go b/plugins/common/starlark/starlark.go index d44e99f0a50a8..0f5be3ad7a63f 100644 --- a/plugins/common/starlark/starlark.go +++ b/plugins/common/starlark/starlark.go @@ -25,6 +25,7 @@ type Common struct { StarlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) thread *starlark.Thread + builtins starlark.StringDict globals starlark.StringDict functions map[string]*starlark.Function parameters map[string]starlark.Tuple @@ -97,8 +98,9 @@ func (s *Common) SetState(state interface{}) error { return fmt.Errorf("state item %q cannot be set: %w", k, err) } } + s.builtins["state"] = s.state - return nil + return s.InitProgram() } func (s *Common) Init() error { @@ -109,44 +111,48 @@ func (s *Common) Init() error { return errors.New("both source or script cannot be set") } - s.thread = &starlark.Thread{ - Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, - Load: func(_ *starlark.Thread, module string) (starlark.StringDict, error) { - return s.StarlarkLoadFunc(module, s.Log) - }, - } + s.builtins = starlark.StringDict{} + s.builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) + s.builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) + s.builtins["catch"] = starlark.NewBuiltin("catch", catch) - builtins := starlark.StringDict{} - builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) - builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) - builtins["catch"] = starlark.NewBuiltin("catch", catch) - - if err := s.addConstants(&builtins); err != nil { + if err := s.addConstants(&s.builtins); err != nil { return err } - // Insert the persisted state if any - if s.state != nil { - builtins["state"] = s.state - } - - // Load the program. In case of an error we can try to insert the state - // which can be used implicitly e.g. when persisting states - program, err := s.sourceProgram(builtins) - if err != nil { + // Initialize the program + if err := s.InitProgram(); err != nil { // Try again with a declared state. This might be necessary for // state persistence. s.state = starlark.NewDict(0) - builtins["state"] = s.state - p, serr := s.sourceProgram(builtins) - if serr != nil { + s.builtins["state"] = s.state + if serr := s.InitProgram(); serr != nil { return err } - program = p + } + + s.functions = make(map[string]*starlark.Function) + s.parameters = make(map[string]starlark.Tuple) + + return nil +} + +func (s *Common) InitProgram() error { + // Load the program. In case of an error we can try to insert the state + // which can be used implicitly e.g. when persisting states + program, err := s.sourceProgram(s.builtins) + if err != nil { + return err } // Execute source - globals, err := program.Init(s.thread, builtins) + s.thread = &starlark.Thread{ + Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, + Load: func(_ *starlark.Thread, module string) (starlark.StringDict, error) { + return s.StarlarkLoadFunc(module, s.Log) + }, + } + globals, err := program.Init(s.thread, s.builtins) if err != nil { return err } @@ -162,10 +168,8 @@ func (s *Common) Init() error { // metrics. Tasks that require global state will not be possible due to // this, so maybe we should relax this in the future. globals.Freeze() - s.globals = globals - s.functions = make(map[string]*starlark.Function) - s.parameters = make(map[string]starlark.Tuple) + return nil } diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go index 6d9d01e623771..9039dbc5c7491 100644 --- a/plugins/inputs/dmcache/dmcache_notlinux.go +++ b/plugins/inputs/dmcache/dmcache_notlinux.go @@ -11,5 +11,5 @@ func (*DMCache) Gather(_ telegraf.Accumulator) error { } func dmSetupStatus() ([]string, error) { - return []string{}, nil + return make([]string, 0), nil } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 95d6ea85161c7..c2d8ac6589781 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -30,10 +30,6 @@ var sampleConfig string var once sync.Once -const ( - defaultWatchMethod = "inotify" -) - var ( offsets = make(map[string]int64) offsetsMutex = new(sync.Mutex) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 3840755040ecd..f954b7d8e0d3e 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -1,11 +1,10 @@ package tail import ( - "bytes" - "log" "os" "path/filepath" "runtime" + "strings" "testing" "time" @@ -22,14 +21,9 @@ import ( "github.com/influxdata/telegraf/testutil" ) -var ( - testdataDir = getTestdataDir() -) - -func NewInfluxParser() (telegraf.Parser, error) { +func newInfluxParser() (telegraf.Parser, error) { parser := &influx.Parser{} - err := parser.Init() - if err != nil { + if err := parser.Init(); err != nil { return nil, err } return parser, nil @@ -42,8 +36,8 @@ func NewTestTail() *Tail { offsetsCopy[k] = v } offsetsMutex.Unlock() - watchMethod := defaultWatchMethod + watchMethod := "inotify" if runtime.GOOS == "windows" { watchMethod = "poll" } @@ -58,61 +52,49 @@ func NewTestTail() *Tail { } func TestTailBadLine(t *testing.T) { - tmpfile, err := os.CreateTemp("", "") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) + content := ` +cpu mytag= foo usage_idle= 100 +cpu usage_idle=100 +` - _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n") - require.NoError(t, err) - - // Write good metric so we can detect when processing is complete - _, err = tmpfile.WriteString("cpu usage_idle=100\n") - require.NoError(t, err) + tmpfile := filepath.Join(t.TempDir(), "input.csv") + require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) - require.NoError(t, tmpfile.Close()) - - buf := &bytes.Buffer{} - log.SetOutput(buf) + logger := &testutil.CaptureLogger{} tt := NewTestTail() - tt.Log = testutil.Logger{} + tt.Log = logger tt.FromBeginning = true - tt.Files = []string{tmpfile.Name()} - tt.SetParserFunc(NewInfluxParser) - - err = tt.Init() - require.NoError(t, err) + tt.Files = []string{tmpfile} + tt.SetParserFunc(newInfluxParser) + require.NoError(t, tt.Init()) - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) - require.NoError(t, acc.GatherError(tt.Gather)) acc.Wait(1) tt.Stop() - require.Contains(t, buf.String(), "Malformed log line") + require.Len(t, logger.Errors(), 1) + require.Contains(t, logger.Errors()[0], "Malformed log line") } func TestColoredLine(t *testing.T) { - tmpfile, err := os.CreateTemp("", "") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - _, err = tmpfile.WriteString("cpu usage_idle=\033[4A\033[4A100\ncpu2 usage_idle=200\n") - require.NoError(t, err) - require.NoError(t, tmpfile.Close()) + content := "cpu usage_idle=\033[4A\033[4A100\ncpu2 usage_idle=200\n" + + tmpfile := filepath.Join(t.TempDir(), "input.csv") + require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Filters = []string{"ansi_color"} - tt.Files = []string{tmpfile.Name()} - tt.SetParserFunc(NewInfluxParser) + tt.Files = []string{tmpfile} + tt.SetParserFunc(newInfluxParser) + require.NoError(t, tt.Init()) - err = tt.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) @@ -129,23 +111,19 @@ func TestColoredLine(t *testing.T) { } func TestTailDosLineEndings(t *testing.T) { - tmpfile, err := os.CreateTemp("", "") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") - require.NoError(t, err) - require.NoError(t, tmpfile.Close()) + content := "cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n" + + tmpfile := filepath.Join(t.TempDir(), "input.csv") + require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{tmpfile.Name()} - tt.SetParserFunc(NewInfluxParser) - - err = tt.Init() - require.NoError(t, err) + tt.Files = []string{tmpfile} + tt.SetParserFunc(newInfluxParser) + require.NoError(t, tt.Init()) - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) @@ -169,7 +147,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { tt := NewTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} + tt.Files = []string{filepath.Join("testdata", "test_multiline.log")} tt.MultilineConfig = MultilineConfig{ Pattern: `^[^\[]`, MatchWhichLine: Previous, @@ -177,17 +155,15 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { Timeout: &duration, } tt.SetParserFunc(createGrokParser) + require.NoError(t, tt.Init()) - err = tt.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) defer tt.Stop() acc.Wait(3) - expectedPath := filepath.Join(testdataDir, "test_multiline.log") + expectedPath := filepath.Join("testdata", "test_multiline.log") acc.AssertContainsTaggedFields(t, "tail_grok", map[string]interface{}{ "message": "HelloExample: This is debug", @@ -220,6 +196,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) + defer tmpfile.Close() defer os.Remove(tmpfile.Name()) // This seems necessary in order to get the test to read the following lines. @@ -242,11 +219,9 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { Timeout: &duration, } tt.SetParserFunc(createGrokParser) + require.NoError(t, tt.Init()) - err = tt.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) time.Sleep(11 * time.Millisecond) // will force timeout _, err = tmpfile.WriteString("[04/Jun/2016:12:41:48 +0100] INFO HelloExample: This is info\r\n") @@ -287,7 +262,7 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} + tt.Files = []string{filepath.Join("testdata", "test_multiline.log")} tt.MultilineConfig = MultilineConfig{ Pattern: `^[^\[]`, MatchWhichLine: Previous, @@ -295,11 +270,9 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test Timeout: &duration, } tt.SetParserFunc(createGrokParser) + require.NoError(t, tt.Init()) - err := tt.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) acc.Wait(3) require.Equal(t, uint64(3), acc.NMetrics()) @@ -307,7 +280,7 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test tt.Stop() acc.Wait(4) - expectedPath := filepath.Join(testdataDir, "test_multiline.log") + expectedPath := filepath.Join("testdata", "test_multiline.log") acc.AssertContainsTaggedFields(t, "tail_grok", map[string]interface{}{ "message": "HelloExample: This is warn", @@ -322,7 +295,7 @@ func createGrokParser() (telegraf.Parser, error) { parser := &grok.Parser{ Measurement: "tail_grok", Patterns: []string{"%{TEST_LOG_MULTILINE}"}, - CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, + CustomPatternFiles: []string{filepath.Join("testdata", "test-patterns")}, Log: testutil.Logger{}, } err := parser.Init() @@ -331,22 +304,18 @@ func createGrokParser() (telegraf.Parser, error) { // The csv parser should only parse the header line once per file. func TestCSVHeadersParsedOnce(t *testing.T) { - tmpfile, err := os.CreateTemp("", "") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - - _, err = tmpfile.WriteString(` + content := ` measurement,time_idle cpu,42 cpu,42 -`) - require.NoError(t, err) - require.NoError(t, tmpfile.Close()) +` + tmpfile := filepath.Join(t.TempDir(), "input.csv") + require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true - plugin.Files = []string{tmpfile.Name()} + plugin.Files = []string{tmpfile} plugin.SetParserFunc(func() (telegraf.Parser, error) { parser := csv.Parser{ MeasurementColumn: "measurement", @@ -356,13 +325,12 @@ cpu,42 err := parser.Init() return &parser, err }) - require.NoError(t, plugin.Init()) expected := []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "path": tmpfile, }, map[string]interface{}{ "time_idle": 42, @@ -370,7 +338,7 @@ cpu,42 time.Unix(0, 0)), testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "path": tmpfile, }, map[string]interface{}{ "time_idle": 42, @@ -383,30 +351,45 @@ cpu,42 defer plugin.Stop() require.NoError(t, plugin.Gather(&acc)) - require.Eventually(t, func() bool { - return acc.NFields() >= len(expected) - }, 3*time.Second, 100*time.Millisecond) + require.Eventuallyf(t, func() bool { + return acc.NMetrics() >= uint64(len(expected)) + }, time.Second, 100*time.Millisecond, "Expected %d metrics found %d", len(expected), acc.NMetrics()) testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } func TestCSVMultiHeaderWithSkipRowANDColumn(t *testing.T) { - tmpfile, err := os.CreateTemp("", "") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - - _, err = tmpfile.WriteString(`garbage nonsense + content := `garbage nonsense skip,measurement,value row,1,2 skip1,cpu,42 skip2,mem,100 -`) - require.NoError(t, err) - require.NoError(t, tmpfile.Close()) +` + tmpfile := filepath.Join(t.TempDir(), "input.csv") + require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) + + expected := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile, + }, + map[string]interface{}{ + "value2": 42, + }, + time.Unix(0, 0)), + testutil.MustMetric("mem", + map[string]string{ + "path": tmpfile, + }, + map[string]interface{}{ + "value2": 100, + }, + time.Unix(0, 0)), + } plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true - plugin.Files = []string{tmpfile.Name()} + plugin.Files = []string{tmpfile} plugin.SetParserFunc(func() (telegraf.Parser, error) { parser := csv.Parser{ MeasurementColumn: "measurement1", @@ -418,95 +401,72 @@ skip2,mem,100 err := parser.Init() return &parser, err }) + require.NoError(t, plugin.Init()) - err = plugin.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} - err = plugin.Start(&acc) - require.NoError(t, err) + var acc testutil.Accumulator + require.NoError(t, plugin.Start(&acc)) defer plugin.Stop() - err = plugin.Gather(&acc) - require.NoError(t, err) - acc.Wait(2) + + require.NoError(t, plugin.Gather(&acc)) + require.Eventuallyf(t, func() bool { + return acc.NMetrics() >= uint64(len(expected)) + }, time.Second, 100*time.Millisecond, "Expected %d metrics found %d", len(expected), acc.NMetrics()) plugin.Stop() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +// Ensure that the first line can produce multiple metrics (#6138) +func TestMultipleMetricsOnFirstLine(t *testing.T) { + content := ` +[{"time_idle": 42}, {"time_idle": 42}] +` + + tmpfile := filepath.Join(t.TempDir(), "input.csv") + require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) + expected := []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "customPathTagMyFile": tmpfile, }, map[string]interface{}{ - "value2": 42, + "time_idle": 42.0, }, time.Unix(0, 0)), - testutil.MustMetric("mem", + testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "customPathTagMyFile": tmpfile, }, map[string]interface{}{ - "value2": 100, + "time_idle": 42.0, }, time.Unix(0, 0)), } - testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) -} - -// Ensure that the first line can produce multiple metrics (#6138) -func TestMultipleMetricsOnFirstLine(t *testing.T) { - tmpfile, err := os.CreateTemp("", "") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - - _, err = tmpfile.WriteString(` -[{"time_idle": 42}, {"time_idle": 42}] -`) - require.NoError(t, err) - require.NoError(t, tmpfile.Close()) plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true - plugin.Files = []string{tmpfile.Name()} + plugin.Files = []string{tmpfile} plugin.PathTag = "customPathTagMyFile" plugin.SetParserFunc(func() (telegraf.Parser, error) { p := &json.Parser{MetricName: "cpu"} err := p.Init() return p, err }) + require.NoError(t, plugin.Init()) - err = plugin.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} - err = plugin.Start(&acc) - require.NoError(t, err) + var acc testutil.Accumulator + require.NoError(t, plugin.Start(&acc)) defer plugin.Stop() - err = plugin.Gather(&acc) - require.NoError(t, err) - acc.Wait(2) + + require.NoError(t, plugin.Gather(&acc)) + require.Eventuallyf(t, func() bool { + return acc.NMetrics() >= uint64(len(expected)) + }, time.Second, 100*time.Millisecond, "Expected %d metrics found %d", len(expected), acc.NMetrics()) plugin.Stop() - expected := []telegraf.Metric{ - testutil.MustMetric("cpu", - map[string]string{ - "customPathTagMyFile": tmpfile.Name(), - }, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0)), - testutil.MustMetric("cpu", - map[string]string{ - "customPathTagMyFile": tmpfile.Name(), - }, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0)), - } - testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestCharacterEncoding(t *testing.T) { @@ -558,7 +518,7 @@ func TestCharacterEncoding(t *testing.T) { ), } - watchMethod := defaultWatchMethod + watchMethod := "inotify" if runtime.GOOS == "windows" { watchMethod = "poll" } @@ -610,7 +570,7 @@ func TestCharacterEncoding(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { plugin := &Tail{ - Files: []string{filepath.Join(testdataDir, tt.testfiles)}, + Files: []string{filepath.Join("testdata", tt.testfiles)}, FromBeginning: tt.fromBeginning, MaxUndeliveredLines: 1000, Log: testutil.Logger{}, @@ -618,7 +578,7 @@ func TestCharacterEncoding(t *testing.T) { WatchMethod: watchMethod, } - plugin.SetParserFunc(NewInfluxParser) + plugin.SetParserFunc(newInfluxParser) require.NoError(t, plugin.Init()) if tt.offset != 0 { @@ -629,7 +589,9 @@ func TestCharacterEncoding(t *testing.T) { var acc testutil.Accumulator require.NoError(t, plugin.Start(&acc)) - acc.Wait(len(tt.expected)) + require.Eventuallyf(t, func() bool { + return acc.NMetrics() >= uint64(len(tt.expected)) + }, time.Second, 100*time.Millisecond, "Expected %d metrics found %d", len(tt.expected), acc.NMetrics()) plugin.Stop() actual := acc.GetTelegrafMetrics() @@ -645,22 +607,20 @@ func TestCharacterEncoding(t *testing.T) { func TestTailEOF(t *testing.T) { tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) + defer tmpfile.Close() defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\n") require.NoError(t, err) - err = tmpfile.Sync() - require.NoError(t, err) + require.NoError(t, tmpfile.Sync()) tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - tt.SetParserFunc(NewInfluxParser) + tt.SetParserFunc(newInfluxParser) + require.NoError(t, tt.Init()) - err = tt.Init() - require.NoError(t, err) - - acc := testutil.Accumulator{} + var acc testutil.Accumulator require.NoError(t, tt.Start(&acc)) defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) @@ -668,8 +628,7 @@ func TestTailEOF(t *testing.T) { _, err = tmpfile.WriteString("cpu2 usage_idle=200\r\n") require.NoError(t, err) - err = tmpfile.Sync() - require.NoError(t, err) + require.NoError(t, tmpfile.Sync()) acc.Wait(2) require.NoError(t, acc.GatherError(tt.Gather)) @@ -681,15 +640,14 @@ func TestTailEOF(t *testing.T) { map[string]interface{}{ "usage_idle": float64(200), }) - - err = tmpfile.Close() - require.NoError(t, err) + require.NoError(t, tmpfile.Close()) } func TestCSVBehavior(t *testing.T) { // Prepare the input file input, err := os.CreateTemp("", "") require.NoError(t, err) + defer input.Close() defer os.Remove(input.Name()) // Write header _, err = input.WriteString("a,b\n") @@ -759,8 +717,6 @@ func TestCSVBehavior(t *testing.T) { require.NoError(t, input.Sync()) require.NoError(t, plugin.Gather(&acc)) require.Eventuallyf(t, func() bool { - acc.Lock() - defer acc.Unlock() return acc.NMetrics() >= uint64(len(expected)) }, time.Second, 100*time.Millisecond, "Expected %d metrics found %d", len(expected), acc.NMetrics()) @@ -776,12 +732,71 @@ func TestCSVBehavior(t *testing.T) { require.NoError(t, input.Close()) } -func getTestdataDir() string { - dir, err := os.Getwd() - if err != nil { - // if we cannot even establish the test directory, further progress is meaningless - panic(err) +func TestStatePersistence(t *testing.T) { + // Prepare the input file + lines := []string{ + "metric,tag=value foo=1i 1730478201000000000\n", + "metric,tag=value foo=2i 1730478211000000000\n", + "metric,tag=value foo=3i 1730478221000000000\n", + } + content := []byte(strings.Join(lines, "")) + + inputFilename := filepath.Join(t.TempDir(), "input.influx") + require.NoError(t, os.WriteFile(inputFilename, content, 0600)) + + // Define the metrics and state to skip the first metric + state := map[string]int64{inputFilename: int64(len(lines[0]))} + expectedState := map[string]int64{inputFilename: int64(len(content))} + expected := []telegraf.Metric{ + metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 2}, + time.Unix(1730478211, 0), + ), + metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 3}, + time.Unix(1730478221, 0), + ), } - return filepath.Join(dir, "testdata") + // Configure the plugin + plugin := &Tail{ + Files: []string{inputFilename}, + MaxUndeliveredLines: 1000, + offsets: make(map[string]int64, 0), + Log: testutil.Logger{}, + } + plugin.SetParserFunc(newInfluxParser) + require.NoError(t, plugin.Init()) + require.Empty(t, plugin.offsets) + + // Setup the "persisted" state + var pi telegraf.StatefulPlugin = plugin + require.NoError(t, pi.SetState(state)) + require.Len(t, plugin.offsets, 1) + + // Run the plugin + var acc testutil.Accumulator + require.NoError(t, plugin.Start(&acc)) + defer plugin.Stop() + + require.NoError(t, plugin.Gather(&acc)) + require.Eventuallyf(t, func() bool { + return acc.NMetrics() >= uint64(len(expected)) + }, time.Second, 100*time.Millisecond, "Expected %d metrics found %d", len(expected), acc.NMetrics()) + plugin.Stop() + + // Check the result + options := []cmp.Option{ + testutil.SortMetrics(), + testutil.IgnoreTime(), + } + actual := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, options...) + + // Check getting the persisted state + actualState, ok := pi.GetState().(map[string]int64) + require.True(t, ok, "state is not a map[string]int64") + require.Equal(t, expectedState, actualState) } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 78ab8e25b6976..cb7d43c26678f 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -99,7 +99,7 @@ func (m *FakePerformanceQuery) ExpandWildCardPath(counterPath string) ([]string, if e, ok := m.expandPaths[counterPath]; ok { return e, nil } - return []string{}, fmt.Errorf("in ExpandWildCardPath: invalid counter path: %q", counterPath) + return nil, fmt.Errorf("in ExpandWildCardPath: invalid counter path: %q", counterPath) } func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle pdhCounterHandle) (float64, error) { diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 4edb0dfbb65ae..725756ae233d8 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -50,7 +50,7 @@ func TestEmptyListIntegration(t *testing.T) { }() winServices := &WinServices{ - ServiceNames: []string{}, + ServiceNames: make([]string, 0), } require.NoError(t, winServices.Init()) diff --git a/plugins/outputs/bigquery/bigquery_test.go b/plugins/outputs/bigquery/bigquery_test.go index 10ac6bc9a1c3a..24ea7f70564ee 100644 --- a/plugins/outputs/bigquery/bigquery_test.go +++ b/plugins/outputs/bigquery/bigquery_test.go @@ -271,19 +271,32 @@ func localBigQueryServer(t *testing.T) *httptest.Server { case "/projects/test-project/datasets/test-dataset/tables/test1/insertAll", "/projects/test-project/datasets/test-dataset/tables/test-metrics/insertAll": decoder := json.NewDecoder(r.Body) - require.NoError(t, decoder.Decode(&receivedBody)) + if err := decoder.Decode(&receivedBody); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } w.WriteHeader(http.StatusOK) - _, err := w.Write([]byte(successfulResponse)) - require.NoError(t, err) + if _, err := w.Write([]byte(successfulResponse)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } case "/projects/test-project/datasets/test-dataset/tables/test-metrics": w.WriteHeader(http.StatusOK) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } default: w.WriteHeader(http.StatusNotFound) - _, err := w.Write([]byte(r.URL.String())) - require.NoError(t, err) + if _, err := w.Write([]byte(r.URL.String())); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } }) diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index 6516ec42fd375..4927524cfbd19 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -25,8 +25,11 @@ import ( func TestNilMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -50,8 +53,11 @@ func TestNilMetrics(t *testing.T) { func TestEmptyMetricsSlice(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -73,8 +79,11 @@ func TestEmptyMetricsSlice(t *testing.T) { func TestMockURL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -131,9 +140,13 @@ func TestSendMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) - bodyString := string(bodyBytes) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + bodyString := string(bodyBytes) lines := strings.Split(bodyString, "\n") sort.Strings(lines) @@ -143,10 +156,15 @@ func TestSendMetrics(t *testing.T) { foundString := strings.Join(lines, "\n") if foundString != expectedString { t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expectedString, foundString) + return } + w.WriteHeader(http.StatusOK) - err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))) - require.NoError(t, err) + if err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -217,7 +235,12 @@ func TestSendMetricsWithPatterns(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + bodyString := string(bodyBytes) lines := strings.Split(bodyString, "\n") @@ -229,10 +252,15 @@ func TestSendMetricsWithPatterns(t *testing.T) { foundString := strings.Join(lines, "\n") if foundString != expectedString { t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expectedString, foundString) + return } + w.WriteHeader(http.StatusOK) - err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))) - require.NoError(t, err) + if err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -323,19 +351,55 @@ func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + bodyString := string(bodyBytes) // use regex because dimension order isn't guaranteed - require.Len(t, bodyString, 94) - require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) - require.Regexp(t, regexp.MustCompile(`a=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`b=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`c=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`dt.metrics.source=telegraf`), bodyString) - require.Regexp(t, regexp.MustCompile(`gauge,3.14 1289430000000$`), bodyString) + if len(bodyString) != 94 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should have %d item(s), but has %d", 94, len(bodyString)) + return + } + if regexp.MustCompile(`^mymeasurement\.myfield`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `^mymeasurement\.myfield`) + return + } + if regexp.MustCompile(`a=test`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `a=test`) + return + } + if regexp.MustCompile(`b=test`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `a=test`) + return + } + if regexp.MustCompile(`c=test`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `a=test`) + return + } + if regexp.MustCompile("dt.metrics.source=telegraf").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dt.metrics.source=telegraf") + return + } + if regexp.MustCompile("gauge,3.14 1289430000000$").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "gauge,3.14 1289430000000$") + return + } w.WriteHeader(http.StatusOK) - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -366,17 +430,27 @@ func TestSendSingleMetricWithUnorderedTags(t *testing.T) { func TestSendMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + bodyString := string(bodyBytes) expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" if bodyString != expected { t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) + return + } + + w.WriteHeader(http.StatusOK) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return } - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) })) defer ts.Close() @@ -407,23 +481,58 @@ func TestSendMetricWithoutTags(t *testing.T) { func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } bodyString := string(bodyBytes) // use regex because dimension order isn't guaranteed - require.Len(t, bodyString, 100) - require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) - require.Regexp(t, regexp.MustCompile(`aaa=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`b_b=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`ccc=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`dt.metrics.source=telegraf`), bodyString) - require.Regexp(t, regexp.MustCompile(`gauge,3.14 1289430000000$`), bodyString) - - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if len(bodyString) != 100 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should have %d item(s), but has %d", 100, len(bodyString)) + return + } + if regexp.MustCompile(`^mymeasurement\.myfield`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `^mymeasurement\.myfield`) + return + } + if regexp.MustCompile(`aaa=test`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `aaa=test`) + return + } + if regexp.MustCompile(`b_b=test`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `b_b=test`) + return + } + if regexp.MustCompile(`ccc=test`).FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, `ccc=test`) + return + } + if regexp.MustCompile("dt.metrics.source=telegraf").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dt.metrics.source=telegraf") + return + } + if regexp.MustCompile("gauge,3.14 1289430000000$").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "gauge,3.14 1289430000000$") + return + } + + w.WriteHeader(http.StatusOK) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -454,17 +563,37 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { func TestSendBooleanMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Len(t, bodyString, 132) - require.Contains(t, bodyString, "mymeasurement.yes,dt.metrics.source=telegraf gauge,1 1289430000000") - require.Contains(t, bodyString, "mymeasurement.no,dt.metrics.source=telegraf gauge,0 1289430000000") - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if len(bodyString) != 132 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should have %d item(s), but has %d", 132, len(bodyString)) + return + } + if !strings.Contains(bodyString, "mymeasurement.yes,dt.metrics.source=telegraf gauge,1 1289430000000") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should contain %q", "mymeasurement.yes,dt.metrics.source=telegraf gauge,1 1289430000000") + return + } + if !strings.Contains(bodyString, "mymeasurement.no,dt.metrics.source=telegraf gauge,0 1289430000000") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should contain %q", "mymeasurement.no,dt.metrics.source=telegraf gauge,0 1289430000000") + return + } + w.WriteHeader(http.StatusOK) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -495,19 +624,47 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { func TestSendMetricWithDefaultDimensions(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Len(t, bodyString, 78) - require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) - require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) - require.Regexp(t, regexp.MustCompile("dim=value"), bodyString) - require.Regexp(t, regexp.MustCompile("gauge,2 1289430000000$"), bodyString) - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if len(bodyString) != 78 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should have %d item(s), but has %d", 78, len(bodyString)) + return + } + if regexp.MustCompile("^mymeasurement.value").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "^mymeasurement.value") + return + } + if regexp.MustCompile("dt.metrics.source=telegraf").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dt.metrics.source=telegraf") + return + } + if regexp.MustCompile("dim=value").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dim=metric") + return + } + if regexp.MustCompile("gauge,2 1289430000000$").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "gauge,2 1289430000000$") + return + } + w.WriteHeader(http.StatusOK) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -538,19 +695,46 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { func TestMetricDimensionsOverrideDefault(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Len(t, bodyString, 80) - require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) - require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) - require.Regexp(t, regexp.MustCompile("dim=metric"), bodyString) - require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if len(bodyString) != 80 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should have %d item(s), but has %d", 80, len(bodyString)) + return + } + if regexp.MustCompile("^mymeasurement.value").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "^mymeasurement.value") + return + } + if regexp.MustCompile("dt.metrics.source=telegraf").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dt.metrics.source=telegraf") + return + } + if regexp.MustCompile("dim=metric").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dim=metric") + return + } + if regexp.MustCompile("gauge,32 1289430000000$").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "gauge,32 1289430000000$") + return + } + w.WriteHeader(http.StatusOK) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() @@ -581,18 +765,41 @@ func TestMetricDimensionsOverrideDefault(t *testing.T) { func TestStaticDimensionsOverrideMetric(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Len(t, bodyString, 53) - require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) - require.Regexp(t, regexp.MustCompile("dim=static"), bodyString) - require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) + if len(bodyString) != 53 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'bodyString' should have %d item(s), but has %d", 53, len(bodyString)) + return + } + if regexp.MustCompile("^mymeasurement.value").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "^mymeasurement.value") + return + } + if regexp.MustCompile("dim=static").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "dim=static") + return + } + if regexp.MustCompile("gauge,32 1289430000000$").FindStringIndex(bodyString) == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Expect \"%v\" to match \"%v\"", bodyString, "gauge,32 1289430000000$") + return + } + w.WriteHeader(http.StatusOK) + if err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer ts.Close() diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index e13f7dc1295af..5cfc477f0d6b4 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -679,14 +679,27 @@ func TestRequestHeaderWhenGzipIsEnabled(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) - require.Equal(t, "gzip", r.Header.Get("Accept-Encoding")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader != "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", contentHeader) + return + } + if acceptHeader := r.Header.Get("Accept-Encoding"); acceptHeader != "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", acceptHeader) + return + } + + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) @@ -714,13 +727,21 @@ func TestRequestHeaderWhenGzipIsDisabled(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.NotEqual(t, "gzip", r.Header.Get("Content-Encoding")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader == "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", contentHeader) + return + } + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) @@ -748,13 +769,21 @@ func TestAuthorizationHeaderWhenBearerTokenIsPresent(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.Equal(t, "Bearer 0123456789abcdef", r.Header.Get("Authorization")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if authHeader := r.Header.Get("Authorization"); authHeader != "Bearer 0123456789abcdef" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Bearer 0123456789abcdef", authHeader) + return + } + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) diff --git a/plugins/outputs/groundwork/groundwork_test.go b/plugins/outputs/groundwork/groundwork_test.go index de073f7b6d86b..a26a4d2f2441a 100644 --- a/plugins/outputs/groundwork/groundwork_test.go +++ b/plugins/outputs/groundwork/groundwork_test.go @@ -34,21 +34,39 @@ func TestWriteWithDebug(t *testing.T) { // Simulate Groundwork server that should receive custom metrics server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Decode body to use in assertions below var obj transit.ResourcesWithServicesRequest - err = json.Unmarshal(body, &obj) - require.NoError(t, err) + if err = json.Unmarshal(body, &obj); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Check if server gets proper data - require.Equal(t, "IntMetric", obj.Resources[0].Services[0].Name) - require.Equal(t, int64(42), *obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue) + if obj.Resources[0].Services[0].Name != "IntMetric" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "IntMetric", obj.Resources[0].Services[0].Name) + return + } + if *obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue != int64(42) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %v, actual: %v", int64(42), *obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue) + return + } // Send back details ans := "Content-type: application/json\n\n" + `{"message":"` + srvTok + `"}` - _, err = fmt.Fprintln(w, ans) - require.NoError(t, err) + if _, err = fmt.Fprintln(w, ans); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) i := Groundwork{ @@ -84,24 +102,62 @@ func TestWriteWithDefaults(t *testing.T) { // Simulate Groundwork server that should receive custom metrics server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Decode body to use in assertions below var obj transit.ResourcesWithServicesRequest - err = json.Unmarshal(body, &obj) - require.NoError(t, err) + if err = json.Unmarshal(body, &obj); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Check if server gets proper data - require.Equal(t, defaultTestAgentID, obj.Context.AgentID) - require.Equal(t, customAppType, obj.Context.AppType) - require.Equal(t, defaultHost, obj.Resources[0].Name) - require.Equal(t, transit.MonitorStatus("SERVICE_OK"), obj.Resources[0].Services[0].Status) - require.Equal(t, "IntMetric", obj.Resources[0].Services[0].Name) - require.Equal(t, int64(42), *obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue) - require.Empty(t, obj.Groups) - - _, err = fmt.Fprintln(w, "OK") - require.NoError(t, err) + if obj.Context.AgentID != defaultTestAgentID { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", defaultTestAgentID, obj.Context.AgentID) + return + } + if obj.Context.AppType != customAppType { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", customAppType, obj.Context.AppType) + return + } + if obj.Resources[0].Name != defaultHost { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", defaultHost, obj.Resources[0].Name) + return + } + if obj.Resources[0].Services[0].Status != transit.MonitorStatus("SERVICE_OK") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", transit.MonitorStatus("SERVICE_OK"), obj.Resources[0].Services[0].Status) + return + } + if obj.Resources[0].Services[0].Name != "IntMetric" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "IntMetric", obj.Resources[0].Services[0].Name) + return + } + if *obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue != int64(42) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %v, actual: %v", int64(42), *obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue) + return + } + if len(obj.Groups) != 0 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'obj.Groups' should not be empty") + return + } + + if _, err = fmt.Fprintln(w, "OK"); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) i := Groundwork{ @@ -136,22 +192,55 @@ func TestWriteWithFields(t *testing.T) { // Simulate Groundwork server that should receive custom metrics server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Decode body to use in assertions below var obj transit.ResourcesWithServicesRequest - err = json.Unmarshal(body, &obj) - require.NoError(t, err) + if err = json.Unmarshal(body, &obj); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Check if server gets proper data - require.Equal(t, "Test Message", obj.Resources[0].Services[0].LastPluginOutput) - require.Equal(t, transit.MonitorStatus("SERVICE_WARNING"), obj.Resources[0].Services[0].Status) - require.InDelta(t, float64(1.0), *obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue, testutil.DefaultDelta) - require.InDelta(t, float64(3.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[0].Value.DoubleValue, testutil.DefaultDelta) - require.InDelta(t, float64(2.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[1].Value.DoubleValue, testutil.DefaultDelta) - - _, err = fmt.Fprintln(w, "OK") - require.NoError(t, err) + if obj.Resources[0].Services[0].LastPluginOutput != "Test Message" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Test Message", obj.Resources[0].Services[0].LastPluginOutput) + return + } + if obj.Resources[0].Services[0].Status != transit.MonitorStatus("SERVICE_WARNING") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", transit.MonitorStatus("SERVICE_WARNING"), obj.Resources[0].Services[0].Status) + return + } + if dt := float64(1.0) - *obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue; !testutil.WithinDefaultDelta(dt) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Max difference between %v and %v allowed is %v, but difference was %v", + float64(1.0), *obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue, testutil.DefaultDelta, dt) + return + } + if dt := float64(3.0) - *obj.Resources[0].Services[0].Metrics[0].Thresholds[0].Value.DoubleValue; !testutil.WithinDefaultDelta(dt) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Max difference between %v and %v allowed is %v, but difference was %v", + float64(3.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[0].Value.DoubleValue, testutil.DefaultDelta, dt) + return + } + if dt := float64(2.0) - *obj.Resources[0].Services[0].Metrics[0].Thresholds[1].Value.DoubleValue; !testutil.WithinDefaultDelta(dt) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Max difference between %v and %v allowed is %v, but difference was %v", + float64(2.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[1].Value.DoubleValue, testutil.DefaultDelta, dt) + return + } + + if _, err = fmt.Fprintln(w, "OK"); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) i := Groundwork{ @@ -197,30 +286,95 @@ func TestWriteWithTags(t *testing.T) { // Simulate Groundwork server that should receive custom metrics server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Decode body to use in assertions below var obj transit.ResourcesWithServicesRequest - err = json.Unmarshal(body, &obj) - require.NoError(t, err) + if err = json.Unmarshal(body, &obj); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Check if server gets proper data - require.Equal(t, defaultTestAgentID, obj.Context.AgentID) - require.Equal(t, defaultAppType, obj.Context.AppType) - require.Equal(t, "Host01", obj.Resources[0].Name) - require.Equal(t, "Service01", obj.Resources[0].Services[0].Name) - require.Equal(t, "FACILITY", *obj.Resources[0].Services[0].Properties["facility"].StringValue) - require.Equal(t, "SEVERITY", *obj.Resources[0].Services[0].Properties["severity"].StringValue) - require.Equal(t, "Group01", obj.Groups[0].GroupName) - require.Equal(t, "Host01", obj.Groups[0].Resources[0].Name) - require.Equal(t, "Test Tag", obj.Resources[0].Services[0].LastPluginOutput) - require.Equal(t, transit.MonitorStatus("SERVICE_PENDING"), obj.Resources[0].Services[0].Status) - require.InDelta(t, float64(1.0), *obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue, testutil.DefaultDelta) - require.InDelta(t, float64(9.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[0].Value.DoubleValue, testutil.DefaultDelta) - require.InDelta(t, float64(6.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[1].Value.DoubleValue, testutil.DefaultDelta) - - _, err = fmt.Fprintln(w, "OK") - require.NoError(t, err) + if obj.Context.AgentID != defaultTestAgentID { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", defaultTestAgentID, obj.Context.AgentID) + return + } + if obj.Context.AppType != defaultAppType { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", defaultAppType, obj.Context.AppType) + return + } + if obj.Resources[0].Name != "Host01" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Host01", obj.Resources[0].Name) + return + } + if obj.Resources[0].Services[0].Name != "Service01" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Service01", obj.Resources[0].Services[0].Name) + return + } + if *obj.Resources[0].Services[0].Properties["facility"].StringValue != "FACILITY" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "FACILITY", *obj.Resources[0].Services[0].Properties["facility"].StringValue) + return + } + if *obj.Resources[0].Services[0].Properties["severity"].StringValue != "SEVERITY" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "SEVERITY", *obj.Resources[0].Services[0].Properties["severity"].StringValue) + return + } + if obj.Groups[0].GroupName != "Group01" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Group01", obj.Groups[0].GroupName) + return + } + if obj.Groups[0].Resources[0].Name != "Host01" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Host01", obj.Groups[0].Resources[0].Name) + return + } + if obj.Resources[0].Services[0].LastPluginOutput != "Test Tag" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Test Tag", obj.Resources[0].Services[0].LastPluginOutput) + return + } + if obj.Resources[0].Services[0].Status != transit.MonitorStatus("SERVICE_PENDING") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", transit.MonitorStatus("SERVICE_PENDING"), obj.Resources[0].Services[0].Status) + return + } + if dt := float64(1.0) - *obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue; !testutil.WithinDefaultDelta(dt) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Max difference between %v and %v allowed is %v, but difference was %v", + float64(1.0), *obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue, testutil.DefaultDelta, dt) + return + } + if dt := float64(9.0) - *obj.Resources[0].Services[0].Metrics[0].Thresholds[0].Value.DoubleValue; !testutil.WithinDefaultDelta(dt) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Max difference between %v and %v allowed is %v, but difference was %v", + float64(9.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[0].Value.DoubleValue, testutil.DefaultDelta, dt) + return + } + if dt := float64(6.0) - *obj.Resources[0].Services[0].Metrics[0].Thresholds[1].Value.DoubleValue; !testutil.WithinDefaultDelta(dt) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Max difference between %v and %v allowed is %v, but difference was %v", + float64(6.0), *obj.Resources[0].Services[0].Metrics[0].Thresholds[1].Value.DoubleValue, testutil.DefaultDelta, dt) + return + } + + if _, err = fmt.Fprintln(w, "OK"); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) i := Groundwork{ diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index b2a81cc2777d2..52ca1cd29a7b7 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -8,6 +8,7 @@ import ( "net/http/httptest" "net/url" "os" + "strings" "testing" "time" @@ -108,7 +109,11 @@ func TestMethod(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, tt.expectedMethod, r.Method) + if r.Method != tt.expectedMethod { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.expectedMethod, r.Method) + return + } w.WriteHeader(http.StatusOK) }) @@ -316,7 +321,11 @@ func TestContentType(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, tt.expected, r.Header.Get("Content-Type")) + if contentHeader := r.Header.Get("Content-Type"); contentHeader != tt.expected { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.expected, contentHeader) + return + } w.WriteHeader(http.StatusOK) }) @@ -365,18 +374,34 @@ func TestContentEncodingGzip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, tt.expected, r.Header.Get("Content-Encoding")) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader != tt.expected { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.expected, contentHeader) + return + } body := r.Body var err error if r.Header.Get("Content-Encoding") == "gzip" { body, err = gzip.NewReader(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } payload, err := io.ReadAll(body) - require.NoError(t, err) - require.Contains(t, string(payload), "cpu value=42") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !strings.Contains(string(payload), "cpu value=42") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'payload' should contain %q", "cpu value=42") + return + } w.WriteHeader(http.StatusNoContent) }) @@ -432,8 +457,16 @@ func TestBasicAuth(t *testing.T) { } ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ := r.BasicAuth() - require.Equal(t, tt.username, username) - require.Equal(t, tt.password, password) + if username != tt.username { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.username, username) + return + } + if password != tt.password { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.password, password) + return + } w.WriteHeader(http.StatusOK) }) @@ -660,7 +693,11 @@ func TestDefaultUserAgent(t *testing.T) { t.Run("default-user-agent", func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + if userHeader := r.Header.Get("User-Agent"); userHeader != internal.ProductToken() { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", internal.ProductToken(), userHeader) + return + } w.WriteHeader(http.StatusOK) }) diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 8f6708cecd2b4..9be03dce2f9ef 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -12,6 +12,8 @@ import ( "net/http/httptest" "net/url" "path" + "reflect" + "strings" "testing" "time" @@ -581,14 +583,30 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/write": - require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader != "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", contentHeader) + return + } gr, err := gzip.NewReader(r.Body) - require.NoError(t, err) - body, err := io.ReadAll(gr) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } - require.Contains(t, string(body), "cpu value=42") + body, err := io.ReadAll(gr) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !strings.Contains(string(body), "cpu value=42") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'body' should contain %q", "cpu value=42") + return + } w.WriteHeader(http.StatusNoContent) return default: @@ -707,13 +725,28 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/write": - err := r.ParseForm() - require.NoError(t, err) - require.Equal(t, []string{"foo"}, r.Form["db"]) + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !reflect.DeepEqual(r.Form["db"], []string{"foo"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", []string{"foo"}, r.Form["db"]) + return + } body, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Contains(t, string(body), "cpu value=42") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !strings.Contains(string(body), "cpu value=42") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'body' should contain %q", "cpu value=42") + return + } w.WriteHeader(http.StatusNoContent) return @@ -1024,8 +1057,11 @@ func TestDBRPTagsCreateDatabaseNotCalledOnRetryAfterForbidden(t *testing.T) { return } w.WriteHeader(http.StatusForbidden) - _, err = w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } default: w.WriteHeader(http.StatusInternalServerError) } @@ -1097,8 +1133,11 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { return } w.WriteHeader(http.StatusForbidden) - _, err = w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } default: w.WriteHeader(http.StatusInternalServerError) } @@ -1107,8 +1146,11 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { switch r.URL.Path { case "/write": w.WriteHeader(http.StatusNotFound) - _, err = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } default: w.WriteHeader(http.StatusInternalServerError) } @@ -1182,8 +1224,11 @@ func TestDBNotFoundShouldDropMetricWhenSkipDatabaseCreateIsTrue(t *testing.T) { switch r.URL.Path { case "/write": w.WriteHeader(http.StatusNotFound) - _, err = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } default: w.WriteHeader(http.StatusInternalServerError) } diff --git a/plugins/outputs/influxdb_v2/influxdb_v2_test.go b/plugins/outputs/influxdb_v2/influxdb_v2_test.go index af9fed082ba69..36c3c3b08e0d9 100644 --- a/plugins/outputs/influxdb_v2/influxdb_v2_test.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2_test.go @@ -5,9 +5,13 @@ import ( "net" "net/http" "net/http/httptest" + "reflect" + "strings" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" @@ -15,7 +19,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSampleConfig(t *testing.T) { @@ -138,12 +141,28 @@ func TestWrite(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/api/v2/write": - require.NoError(t, r.ParseForm()) - require.Equal(t, []string{"foobar"}, r.Form["bucket"]) + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !reflect.DeepEqual(r.Form["bucket"], []string{"foobar"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", []string{"foobar"}, r.Form["bucket"]) + return + } body, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Contains(t, string(body), "cpu value=42.123") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !strings.Contains(string(body), "cpu value=42.123") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'body' should contain %q", "cpu value=42.123") + return + } w.WriteHeader(http.StatusNoContent) return @@ -193,12 +212,28 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/api/v2/write": - require.NoError(t, r.ParseForm()) - require.Equal(t, []string{"foo"}, r.Form["bucket"]) + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !reflect.DeepEqual(r.Form["bucket"], []string{"foo"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", []string{"foo"}, r.Form["bucket"]) + return + } body, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Contains(t, string(body), "cpu value=42") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !strings.Contains(string(body), "cpu value=42") { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'body' should contain %q", "cpu value=42") + return + } w.WriteHeader(http.StatusNoContent) return @@ -246,10 +281,18 @@ func TestTooLargeWriteRetry(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/api/v2/write": - require.NoError(t, r.ParseForm()) + if err := r.ParseForm(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } // Ensure metric body size is small if len(body) > 16 { diff --git a/plugins/outputs/logzio/logzio_test.go b/plugins/outputs/logzio/logzio_test.go index 701d92a0e2b32..9285912548f56 100644 --- a/plugins/outputs/logzio/logzio_test.go +++ b/plugins/outputs/logzio/logzio_test.go @@ -8,6 +8,7 @@ import ( "io" "net/http" "net/http/httptest" + "reflect" "testing" "github.com/stretchr/testify/require" @@ -72,24 +73,54 @@ func TestWrite(t *testing.T) { var body bytes.Buffer ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { gz, err := gzip.NewReader(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } var maxDecompressionSize int64 = 500 * 1024 * 1024 n, err := io.CopyN(&body, gz, maxDecompressionSize) if errors.Is(err, io.EOF) { err = nil } - require.NoError(t, err) - require.NotEqualf(t, n, maxDecompressionSize, "size of decoded data exceeds allowed size %d", maxDecompressionSize) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if n > maxDecompressionSize { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Size of decoded data exceeds (%v) allowed size (%v)", n, maxDecompressionSize) + return + } var lm Metric - err = json.Unmarshal(body.Bytes(), &lm) - require.NoError(t, err) - - require.Equal(t, tm.Fields(), lm.Metric[tm.Name()]) - require.Equal(t, logzioType, lm.Type) - require.Equal(t, tm.Tags(), lm.Dimensions) - require.Equal(t, tm.Time(), lm.Time) + if err = json.Unmarshal(body.Bytes(), &lm); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if !reflect.DeepEqual(lm.Metric[tm.Name()], tm.Fields()) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tm.Fields(), lm.Metric[tm.Name()]) + return + } + if lm.Type != logzioType { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", logzioType, lm.Type) + return + } + if !reflect.DeepEqual(lm.Dimensions, tm.Tags()) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tm.Tags(), lm.Dimensions) + return + } + if lm.Time != tm.Time() { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tm.Time(), lm.Time) + return + } w.WriteHeader(http.StatusOK) })) diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index 49a96914ed784..a812a8b09a1a6 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -7,6 +7,8 @@ import ( "net/http" "net/http/httptest" "net/url" + "reflect" + "strings" "testing" "time" @@ -161,7 +163,11 @@ func TestContentType(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, tt.expected, r.Header.Get("Content-Type")) + if contentHeader := r.Header.Get("Content-Type"); contentHeader != tt.expected { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.expected, contentHeader) + return + } w.WriteHeader(http.StatusOK) }) @@ -205,28 +211,71 @@ func TestContentEncodingGzip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, tt.expected, r.Header.Get("Content-Encoding")) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader != tt.expected { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.expected, contentHeader) + return + } body := r.Body var err error if r.Header.Get("Content-Encoding") == "gzip" { body, err = gzip.NewReader(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } payload, err := io.ReadAll(body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } var s Request - err = json.Unmarshal(payload, &s) - require.NoError(t, err) - require.Len(t, s.Streams, 1) - require.Len(t, s.Streams[0].Logs, 1) - require.Len(t, s.Streams[0].Logs[0], 2) - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) - require.Equal(t, "123000000000", s.Streams[0].Logs[0][0]) - require.Contains(t, s.Streams[0].Logs[0][1], "line=\"my log\"") - require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") + if err = json.Unmarshal(payload, &s); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if len(s.Streams) != 1 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams' should have %d item(s), but has %d", 1, len(s.Streams)) + return + } + if len(s.Streams[0].Logs) != 1 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs' should have %d item(s), but has %d", 1, len(s.Streams[0].Logs)) + return + } + if len(s.Streams[0].Logs[0]) != 2 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[0]' should have %d item(s), but has %d", 2, len(s.Streams[0].Logs[0])) + return + } + if !reflect.DeepEqual(s.Streams[0].Labels, map[string]string{"key1": "value1"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", map[string]string{"key1": "value1"}, s.Streams[0].Labels) + return + } + if s.Streams[0].Logs[0][0] != "123000000000" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "123000000000", s.Streams[0].Logs[0][0]) + return + } + if !strings.Contains(s.Streams[0].Logs[0][1], `line="my log"`) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[0][1]' should contain %q", `line="my log"`) + return + } + if !strings.Contains(s.Streams[0].Logs[0][1], `field="3.14"`) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[0][1]' should contain %q", `field="3.14"`) + return + } w.WriteHeader(http.StatusNoContent) }) @@ -264,16 +313,32 @@ func TestMetricNameLabel(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { payload, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } var s Request - require.NoError(t, json.Unmarshal(payload, &s)) + if err := json.Unmarshal(payload, &s); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } switch tt.metricNameLabel { case "": - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) + if !reflect.DeepEqual(s.Streams[0].Labels, map[string]string{"key1": "value1"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", map[string]string{"key1": "value1"}, s.Streams[0].Labels) + return + } case "foobar": - require.Equal(t, map[string]string{"foobar": "log", "key1": "value1"}, s.Streams[0].Labels) + if !reflect.DeepEqual(s.Streams[0].Labels, map[string]string{"foobar": "log", "key1": "value1"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", map[string]string{"foobar": "log", "key1": "value1"}, s.Streams[0].Labels) + return + } } w.WriteHeader(http.StatusNoContent) @@ -315,8 +380,16 @@ func TestBasicAuth(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ := r.BasicAuth() - require.Equal(t, tt.username, username) - require.Equal(t, tt.password, password) + if username != tt.username { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.username, username) + return + } + if password != tt.password { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.password, password) + return + } w.WriteHeader(http.StatusOK) }) @@ -412,7 +485,11 @@ func TestDefaultUserAgent(t *testing.T) { t.Run("default-user-agent", func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + if userHeader := r.Header.Get("User-Agent"); userHeader != internal.ProductToken() { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", internal.ProductToken(), userHeader) + return + } w.WriteHeader(http.StatusOK) }) @@ -440,21 +517,68 @@ func TestMetricSorting(t *testing.T) { var err error payload, err := io.ReadAll(body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } var s Request - err = json.Unmarshal(payload, &s) - require.NoError(t, err) - require.Len(t, s.Streams, 1) - require.Len(t, s.Streams[0].Logs, 2) - require.Len(t, s.Streams[0].Logs[0], 2) - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) - require.Equal(t, "456000000000", s.Streams[0].Logs[0][0]) - require.Contains(t, s.Streams[0].Logs[0][1], "line=\"older log\"") - require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") - require.Equal(t, "1230000000000", s.Streams[0].Logs[1][0]) - require.Contains(t, s.Streams[0].Logs[1][1], "line=\"newer log\"") - require.Contains(t, s.Streams[0].Logs[1][1], "field=\"3.14\"") + if err = json.Unmarshal(payload, &s); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if len(s.Streams) != 1 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams' should have %d item(s), but has %d", 1, len(s.Streams)) + return + } + if len(s.Streams[0].Logs) != 2 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs' should have %d item(s), but has %d", 2, len(s.Streams[0].Logs)) + return + } + if len(s.Streams[0].Logs[0]) != 2 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[0]' should have %d item(s), but has %d", 2, len(s.Streams[0].Logs[0])) + return + } + if !reflect.DeepEqual(s.Streams[0].Labels, map[string]string{"key1": "value1"}) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", map[string]string{"key1": "value1"}, s.Streams[0].Labels) + return + } + if s.Streams[0].Logs[0][0] != "456000000000" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "456000000000", s.Streams[0].Logs[0][0]) + return + } + if !strings.Contains(s.Streams[0].Logs[0][1], `line="older log"`) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[0][1]' should contain %q", `line="older log"`) + return + } + if !strings.Contains(s.Streams[0].Logs[0][1], `field="3.14"`) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[0][1]' should contain %q", `field="3.14"`) + return + } + if s.Streams[0].Logs[1][0] != "1230000000000" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "1230000000000", s.Streams[0].Logs[1][0]) + return + } + if !strings.Contains(s.Streams[0].Logs[1][1], `line="newer log"`) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[1][1]' should contain %q", `line="newer log"`) + return + } + if !strings.Contains(s.Streams[0].Logs[1][1], `field="3.14"`) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'s.Streams[0].Logs[1][1]' should contain %q", `field="3.14"`) + return + } w.WriteHeader(http.StatusNoContent) }) diff --git a/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring_test.go b/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring_test.go index 51c4f310f1bc5..c16cd59278a34 100644 --- a/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring_test.go +++ b/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring_test.go @@ -31,11 +31,17 @@ func TestWrite(t *testing.T) { ExpiresIn: 123, } w.Header().Set("Content-Type", "application/json; charset=utf-8") - err := json.NewEncoder(w).Encode(token) - require.NoError(t, err) + if err := json.NewEncoder(w).Encode(token); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } else if strings.HasSuffix(r.URL.Path, "/folder") { - _, err := io.WriteString(w, "folder1") - require.NoError(t, err) + if _, err := io.WriteString(w, "folder1"); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } w.WriteHeader(http.StatusOK) }), diff --git a/plugins/outputs/opensearch/opensearch_test.go b/plugins/outputs/opensearch/opensearch_test.go index bdde22a979ec8..ad83178ab9645 100644 --- a/plugins/outputs/opensearch/opensearch_test.go +++ b/plugins/outputs/opensearch/opensearch_test.go @@ -148,14 +148,26 @@ func TestRequestHeaderWhenGzipIsEnabled(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) - require.Equal(t, "gzip", r.Header.Get("Accept-Encoding")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader != "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", contentHeader) + return + } + if acceptHeader := r.Header.Get("Accept-Encoding"); acceptHeader != "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", acceptHeader) + return + } + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) @@ -188,13 +200,21 @@ func TestRequestHeaderWhenGzipIsDisabled(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.NotEqual(t, "gzip", r.Header.Get("Content-Encoding")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if contentHeader := r.Header.Get("Content-Encoding"); contentHeader == "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", contentHeader) + return + } + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) @@ -224,13 +244,21 @@ func TestAuthorizationHeaderWhenBearerTokenIsPresent(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.Equal(t, "Bearer 0123456789abcdef", r.Header.Get("Authorization")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) + if authHeader := r.Header.Get("Authorization"); authHeader != "Bearer 0123456789abcdef" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Bearer 0123456789abcdef", authHeader) + return + } + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) @@ -284,13 +312,21 @@ func TestDisconnectedServerOnWrite(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/_bulk": - require.Equal(t, "Bearer 0123456789abcdef", r.Header.Get("Authorization")) - _, err := w.Write([]byte("{}")) - require.NoError(t, err) - return + if authHeader := r.Header.Get("Authorization"); authHeader != "Bearer 0123456789abcdef" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "Bearer 0123456789abcdef", authHeader) + return + } + if _, err := w.Write([]byte("{}")); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } default: - _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + } return } })) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 7a0c628e0733f..2527042752a8c 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -132,6 +132,12 @@ to use them. ## tag IDs. Each entry consumes approximately 34 bytes of memory. # tag_cache_size = 100000 + ## Cut column names at the given length to not exceed PostgreSQL's + ## 'identifier length' limit (default: no limit) + ## (see https://www.postgresql.org/docs/current/limits.html) + ## Be careful to not create duplicate column names! + # column_name_length_limit = 0 + ## Enable & set the log level for the Postgres driver. # log_level = "warn" # trace, debug, info, warn, error, none ``` @@ -197,6 +203,19 @@ Documentation on how to write templates can be found [sqltemplate docs][1] [1]: https://pkg.go.dev/github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate +## Long Column Names + +Postgres imposes a limit on the length of column identifiers, which can be found +in the [official docs](https://www.postgresql.org/docs/current/limits.html). By +default Telegraf does not enforce this limit as this limit can be modified on +the server side. Furthermore, cutting off column names could lead to collisions +if the columns are only different after the cut-off. + +> [!WARNING] +> Make sure you will not cause column name collisions when setting +> `column_name_length_limit`! If in doubt, explicitly shorten the field and tag +> names using e.g. the regexp processor. + ### Samples #### TimescaleDB diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 0e93b061e95d0..2f1a8164aa9d4 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -51,6 +51,7 @@ type Postgresql struct { Uint64Type string `toml:"uint64_type"` RetryMaxBackoff config.Duration `toml:"retry_max_backoff"` TagCacheSize int `toml:"tag_cache_size"` + ColumnNameLenLimit int `toml:"column_name_length_limit"` LogLevel string `toml:"log_level"` Logger telegraf.Logger `toml:"-"` diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 846f5c27ff5d5..ad9afee456e04 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -976,3 +976,239 @@ func TestStressConcurrencyIntegration(t *testing.T) { } } } + +func TestLongColumnNamesErrorIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + // Setup the plugin + p, err := newPostgresqlTest(t) + require.NoError(t, err) + require.NoError(t, p.Init()) + require.NoError(t, p.Connect()) + + // Define the metric to send + metrics := []telegraf.Metric{ + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(0), + "value": 42, + }, + time.Unix(0, 0).UTC(), + ), + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(1), + "value": 43, + }, + time.Unix(0, 1).UTC(), + ), + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(2), + "value": 44, + }, + time.Unix(0, 2).UTC(), + ), + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_another_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(99), + "value": 45, + }, + time.Unix(0, 9).UTC(), + ), + } + require.NoError(t, p.Write(metrics)) + require.NoError(t, p.Write(metrics)) + + // Check if the logging is restricted to once per field and all columns are + // mentioned + var longColLogErrs []string + for _, l := range p.Logger.logs { + msg := l.String() + if l.level == pgx.LogLevelError && strings.Contains(msg, "Column name too long") { + longColLogErrs = append(longColLogErrs, strings.TrimPrefix(msg, "error: Column name too long: ")) + } + } + excpectedLongColumns := []string{ + `"a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63"`, + `"a_field_with_another_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63"`, + } + require.ElementsMatch(t, excpectedLongColumns, longColLogErrs) + + // Denote the expected data in the table + expected := []map[string]interface{}{ + {"time": time.Unix(0, 0).Unix(), "value": int64(42)}, + {"time": time.Unix(0, 1).Unix(), "value": int64(43)}, + {"time": time.Unix(0, 2).Unix(), "value": int64(44)}, + {"time": time.Unix(0, 9).Unix(), "value": int64(45)}, + {"time": time.Unix(0, 0).Unix(), "value": int64(42)}, + {"time": time.Unix(0, 1).Unix(), "value": int64(43)}, + {"time": time.Unix(0, 2).Unix(), "value": int64(44)}, + {"time": time.Unix(0, 9).Unix(), "value": int64(45)}, + } + + // Get the actual table data nd convert the time to a timestamp for + // easier comparison + dump := dbTableDump(t, p.db, "") + require.Len(t, dump, len(expected)) + for i, actual := range dump { + if raw, found := actual["time"]; found { + if t, ok := raw.(time.Time); ok { + actual["time"] = t.Unix() + } + } + require.EqualValues(t, expected[i], actual) + } +} + +func TestLongColumnNamesClipIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + // Setup the plugin + p, err := newPostgresqlTest(t) + require.NoError(t, err) + p.ColumnNameLenLimit = 63 + require.NoError(t, p.Init()) + require.NoError(t, p.Connect()) + + // Define the metric to send + metrics := []telegraf.Metric{ + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(0), + "value": 42, + }, + time.Unix(0, 0).UTC(), + ), + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(1), + "value": 43, + }, + time.Unix(0, 1).UTC(), + ), + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(2), + "value": 44, + }, + time.Unix(0, 2).UTC(), + ), + metric.New( + t.Name(), + map[string]string{}, + map[string]interface{}{ + "a_field_with_another_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63": int64(99), + "value": 45, + }, + time.Unix(0, 9).UTC(), + ), + } + require.NoError(t, p.Write(metrics)) + require.NoError(t, p.Write(metrics)) + + // Check if the logging is restricted to once per field and all columns are mentioned + var longColLogWarns []string + var longColLogErrs []string + for _, l := range p.Logger.logs { + msg := l.String() + if l.level == pgx.LogLevelWarn && strings.Contains(msg, "Limiting too long column name") { + longColLogWarns = append(longColLogWarns, strings.TrimPrefix(msg, "warn: Limiting too long column name: ")) + continue + } + if l.level == pgx.LogLevelError && strings.Contains(msg, "Column name too long") { + longColLogErrs = append(longColLogErrs, strings.TrimPrefix(msg, "error: Column name too long: ")) + continue + } + } + + excpectedLongColumns := []string{ + `"a_field_with_a_some_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63"`, + `"a_field_with_another_very_long_name_exceeding_the_column_name_limit_of_postgres_of_63"`, + } + require.ElementsMatch(t, excpectedLongColumns, longColLogWarns) + require.Empty(t, longColLogErrs) + + // Denote the expected data in the table + expected := []map[string]interface{}{ + { + "time": time.Unix(0, 0).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": int64(0), + "a_field_with_another_very_long_name_exceeding_the_column_name_l": nil, + "value": int64(42), + }, + { + "time": time.Unix(0, 1).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": int64(1), + "a_field_with_another_very_long_name_exceeding_the_column_name_l": nil, + "value": int64(43), + }, + { + "time": time.Unix(0, 2).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": int64(2), + "a_field_with_another_very_long_name_exceeding_the_column_name_l": nil, + "value": int64(44), + }, + { + "time": time.Unix(0, 9).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": nil, + "a_field_with_another_very_long_name_exceeding_the_column_name_l": int64(99), + "value": int64(45), + }, + { + "time": time.Unix(0, 0).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": int64(0), + "a_field_with_another_very_long_name_exceeding_the_column_name_l": nil, + "value": int64(42), + }, + { + "time": time.Unix(0, 1).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": int64(1), + "a_field_with_another_very_long_name_exceeding_the_column_name_l": nil, + "value": int64(43), + }, + { + "time": time.Unix(0, 2).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": int64(2), + "a_field_with_another_very_long_name_exceeding_the_column_name_l": nil, + "value": int64(44), + }, + { + "time": time.Unix(0, 9).Unix(), + "a_field_with_a_some_very_long_name_exceeding_the_column_name_li": nil, + "a_field_with_another_very_long_name_exceeding_the_column_name_l": int64(99), + "value": int64(45), + }, + } + + // Get the actual table data nd convert the time to a timestamp for + // easier comparison + dump := dbTableDump(t, p.db, "") + require.Len(t, dump, len(expected)) + for i, actual := range dump { + if raw, found := actual["time"]; found { + if t, ok := raw.(time.Time); ok { + actual["time"] = t.Unix() + } + } + require.EqualValues(t, expected[i], actual) + } +} diff --git a/plugins/outputs/postgresql/sample.conf b/plugins/outputs/postgresql/sample.conf index e1a99e796c956..13cc0c32fb879 100644 --- a/plugins/outputs/postgresql/sample.conf +++ b/plugins/outputs/postgresql/sample.conf @@ -87,5 +87,11 @@ ## tag IDs. Each entry consumes approximately 34 bytes of memory. # tag_cache_size = 100000 + ## Cut column names at the given length to not exceed PostgreSQL's + ## 'identifier length' limit (default: no limit) + ## (see https://www.postgresql.org/docs/current/limits.html) + ## Be careful to not create duplicate column names! + # column_name_length_limit = 0 + ## Enable & set the log level for the Postgres driver. # log_level = "warn" # trace, debug, info, warn, error, none diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 7dea7f8fab71e..a0cd5a9bc123b 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -27,14 +27,20 @@ type TableManager struct { // map[tableName]map[columnName]utils.Column tables map[string]*tableState tablesMutex sync.Mutex + + // Map to track which columns are already logged + loggedLongColumnWarn map[string]bool + loggedLongColumnErr map[string]bool } // NewTableManager returns an instance of the tables.Manager interface // that can handle checking and updating the state of tables in the PG database. func NewTableManager(postgresql *Postgresql) *TableManager { return &TableManager{ - Postgresql: postgresql, - tables: make(map[string]*tableState), + Postgresql: postgresql, + tables: make(map[string]*tableState), + loggedLongColumnWarn: make(map[string]bool), + loggedLongColumnErr: make(map[string]bool), } } @@ -178,7 +184,15 @@ func (tm *TableManager) EnsureStructure( // check that the missing columns are columns that can be added addColumns := make([]utils.Column, 0, len(missingCols)) invalidColumns := make([]utils.Column, 0, len(missingCols)) - for _, col := range missingCols { + for i, col := range missingCols { + if tm.ColumnNameLenLimit > 0 && len(col.Name) > tm.ColumnNameLenLimit { + if !tm.loggedLongColumnWarn[col.Name] { + tm.Postgresql.Logger.Warnf("Limiting too long column name: %q", col.Name) + tm.loggedLongColumnWarn[col.Name] = true + } + col.Name = col.Name[:tm.ColumnNameLenLimit] + missingCols[i] = col + } if tm.validateColumnName(col.Name) { addColumns = append(addColumns, col) continue @@ -187,7 +201,10 @@ func (tm *TableManager) EnsureStructure( if col.Role == utils.TagColType { return nil, fmt.Errorf("column name too long: %q", col.Name) } - tm.Postgresql.Logger.Errorf("Column name too long: %q", col.Name) + if !tm.loggedLongColumnErr[col.Name] { + tm.Postgresql.Logger.Errorf("Column name too long: %q", col.Name) + tm.loggedLongColumnErr[col.Name] = true + } invalidColumns = append(invalidColumns, col) } diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 45ccccf986765..82be5cfe2069b 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -440,8 +440,11 @@ rpc_duration_seconds_count 2693 t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - _, err := w.Write(tt.data) - require.NoError(t, err) + if _, err := w.Write(tt.data); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } }) input := &inputs.Prometheus{ diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 423e9d3772720..8f94995f504e2 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -469,8 +469,11 @@ rpc_duration_seconds_count 2693 t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - _, err := w.Write(tt.data) - require.NoError(t, err) + if _, err := w.Write(tt.data); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } }) input := &inputs.Prometheus{ diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go index a8b79980ed028..d14806fe6f4cf 100644 --- a/plugins/outputs/sensu/sensu_test.go +++ b/plugins/outputs/sensu/sensu_test.go @@ -7,13 +7,13 @@ import ( "math" "net/http" "net/http/httptest" + "slices" "testing" corev2 "github.com/sensu/sensu-go/api/core/v2" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/testutil" ) @@ -116,25 +116,67 @@ func TestConnectAndWrite(t *testing.T) { t.Run("write", func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, expectedURL, r.URL.String()) - require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) + if urlString := r.URL.String(); urlString != expectedURL { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", expectedURL, urlString) + return + } + if authHeader := r.Header.Get("Authorization"); authHeader != expectedAuthHeader { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", expectedAuthHeader, authHeader) + return + } + // let's make sure what we received is a valid Sensu event that contains all of the expected data body, err := io.ReadAll(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } receivedEvent := &corev2.Event{} - err = json.Unmarshal(body, receivedEvent) - require.NoError(t, err) - require.Equal(t, testCheck, receivedEvent.Check.Name) - require.Equal(t, testEntity, receivedEvent.Entity.Name) - require.NotEmpty(t, receivedEvent.Metrics) - require.True(t, choice.Contains(testHandler, receivedEvent.Metrics.Handlers)) - require.NotEmpty(t, receivedEvent.Metrics.Points) + + if err = json.Unmarshal(body, receivedEvent); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if receivedEvent.Check.Name != testCheck { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", testCheck, receivedEvent.Check.Name) + return + } + if receivedEvent.Entity.Name != testEntity { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", testEntity, receivedEvent.Entity.Name) + return + } + if receivedEvent.Metrics == nil { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'receivedEvent.Metrics' should not be nil") + return + } + if !slices.Contains(receivedEvent.Metrics.Handlers, testHandler) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'receivedEvent.Metrics.Handlers' should contain %q", testHandler) + return + } + if len(receivedEvent.Metrics.Points) == 0 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'receivedEvent.Metrics.Points' should not be empty") + return + } pointFound := false tagFound := false for _, p := range receivedEvent.Metrics.Points { if p.Name == expectedPointName+".value" && p.Value == expectedPointValue { pointFound = true - require.NotEmpty(t, p.Tags) + if len(p.Tags) == 0 { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'p.Tags' should not be empty") + return + } + for _, t := range p.Tags { if t.Name == testTagName && t.Value == testTagValue { tagFound = true @@ -142,8 +184,17 @@ func TestConnectAndWrite(t *testing.T) { } } } - require.True(t, pointFound) - require.True(t, tagFound) + + if !pointFound { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'pointFound' should be true") + return + } + if !tagFound { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("'tagFound' should be true") + return + } w.WriteHeader(http.StatusCreated) }) err := plugin.Write([]telegraf.Metric{testutil.TestMetric(expectedPointValue, expectedPointName)}) diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 7ce7d723e3cd6..c9b98035b00b9 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -90,7 +90,11 @@ func TestMethod(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, tt.expectedMethod, r.Method) + if r.Method != tt.expectedMethod { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", tt.expectedMethod, r.Method) + return + } w.WriteHeader(http.StatusOK) }) @@ -257,15 +261,27 @@ func TestContentType(t *testing.T) { var body bytes.Buffer ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { gz, err := gzip.NewReader(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } var maxDecompressionSize int64 = 500 * 1024 * 1024 n, err := io.CopyN(&body, gz, maxDecompressionSize) if errors.Is(err, io.EOF) { err = nil } - require.NoError(t, err) - require.NotEqualf(t, n, maxDecompressionSize, "size of decoded data exceeds allowed size %d", maxDecompressionSize) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if n > maxDecompressionSize { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Size of decoded data exceeds (%v) allowed size (%v)", n, maxDecompressionSize) + return + } w.WriteHeader(http.StatusOK) })) @@ -313,15 +329,30 @@ func TestContentEncodingGzip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) + if r.Header.Get("Content-Encoding") != "gzip" { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "gzip", r.Header.Get("Content-Encoding")) + return + } body, err := gzip.NewReader(r.Body) - require.NoError(t, err) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } payload, err := io.ReadAll(body) - require.NoError(t, err) - - require.Equal(t, "metric=cpu field=value 42 0\n", string(payload)) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } + if "metric=cpu field=value 42 0\n" != string(payload) { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", "metric=cpu field=value 42 0\n", string(payload)) + return + } w.WriteHeader(http.StatusNoContent) }) @@ -352,7 +383,11 @@ func TestDefaultUserAgent(t *testing.T) { t.Run("default-user-agent", func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + if r.Header.Get("User-Agent") != internal.ProductToken() { + w.WriteHeader(http.StatusInternalServerError) + t.Errorf("Not equal, expected: %q, actual: %q", internal.ProductToken(), r.Header.Get("User-Agent")) + return + } w.WriteHeader(http.StatusOK) }) diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go index facd0fb70aae5..512677d4cf7fa 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go @@ -32,11 +32,17 @@ func TestWrite(t *testing.T) { ExpiresIn: 123, } w.Header().Set("Content-Type", "application/json; charset=utf-8") - err := json.NewEncoder(w).Encode(token) - require.NoError(t, err) + if err := json.NewEncoder(w).Encode(token); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } else if strings.HasSuffix(r.URL.Path, "/folder") { - _, err := io.WriteString(w, "folder1") - require.NoError(t, err) + if _, err := io.WriteString(w, "folder1"); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } w.WriteHeader(http.StatusOK) }), diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index 7eded25d522a3..41c2760182721 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -1,6 +1,7 @@ package dedup import ( + "fmt" "sync" "testing" "time" @@ -457,3 +458,75 @@ func TestTracking(t *testing.T) { return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } + +func TestStatePersistence(t *testing.T) { + now := time.Now() + + // Define the metrics and states + state := fmt.Sprintf("metric,tag=value foo=1i %d\n", now.Add(-1*time.Minute).UnixNano()) + input := []telegraf.Metric{ + metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 1}, + now.Add(-2*time.Second), + ), + metric.New("metric", + map[string]string{"tag": "pass"}, + map[string]interface{}{"foo": 1}, + now.Add(-1*time.Second), + ), + metric.New( + "metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 3}, + now, + ), + } + + expected := []telegraf.Metric{ + metric.New("metric", + map[string]string{"tag": "pass"}, + map[string]interface{}{"foo": 1}, + now.Add(-1*time.Second), + ), + metric.New( + "metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 3}, + now, + ), + } + expectedState := []string{ + fmt.Sprintf("metric,tag=pass foo=1i %d\n", now.Add(-1*time.Second).UnixNano()), + fmt.Sprintf("metric,tag=value foo=3i %d\n", now.UnixNano()), + } + + // Configure the plugin + plugin := &Dedup{ + DedupInterval: config.Duration(10 * time.Hour), // use a long interval to avoid flaky tests + FlushTime: now.Add(-1 * time.Second), + Cache: make(map[uint64]telegraf.Metric), + } + require.Empty(t, plugin.Cache) + + // Setup the "persisted" state + var pi telegraf.StatefulPlugin = plugin + require.NoError(t, pi.SetState([]byte(state))) + require.Len(t, plugin.Cache, 1) + + // Process expected metrics and compare with resulting metrics + actual := plugin.Apply(input...) + testutil.RequireMetricsEqual(t, expected, actual) + + // Check getting the persisted state + // Because the cache is a map, the order of metrics in the state is not + // guaranteed, so check the string contents regardless of the order. + actualState, ok := pi.GetState().([]byte) + require.True(t, ok, "state is not a bytes array") + var expectedLen int + for _, m := range expectedState { + require.Contains(t, string(actualState), m) + expectedLen += len(m) + } + require.Len(t, actualState, expectedLen) +} diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 61785e98b4483..ba3184e143ea2 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -120,8 +120,7 @@ func (s *Starlark) Add(origMetric telegraf.Metric, acc telegraf.Accumulator) err return nil } -func (s *Starlark) Stop() { -} +func (s *Starlark) Stop() {} func containsMetric(metrics []telegraf.Metric, target telegraf.Metric) bool { for _, m := range metrics { diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 13127cfa45ba2..e9ef5c7708253 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3680,13 +3680,13 @@ def apply(metric): Log: testutil.Logger{}, }, } + require.NoError(t, plugin.Init()) // Setup the "persisted" state var pi telegraf.StatefulPlugin = plugin var buf bytes.Buffer require.NoError(t, gob.NewEncoder(&buf).Encode(map[string]interface{}{"instance": "myhost"})) require.NoError(t, pi.SetState(buf.Bytes())) - require.NoError(t, plugin.Init()) var acc testutil.Accumulator require.NoError(t, plugin.Start(&acc)) diff --git a/plugins/secretstores/http/http_test.go b/plugins/secretstores/http/http_test.go index 82e90b64609bb..7cb79c43015b0 100644 --- a/plugins/secretstores/http/http_test.go +++ b/plugins/secretstores/http/http_test.go @@ -62,8 +62,11 @@ func TestCases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/secrets" { - _, err = w.Write(input) - require.NoError(t, err) + if _, err = w.Write(input); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } } else { w.WriteHeader(http.StatusNotFound) } @@ -156,8 +159,11 @@ func TestGetErrors(t *testing.T) { func TestResolver(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - _, err := w.Write([]byte(`{"test": "aedMZXaLR246OHHjVtJKXQ=="}`)) - require.NoError(t, err) + if _, err := w.Write([]byte(`{"test": "aedMZXaLR246OHHjVtJKXQ=="}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer server.Close() @@ -198,8 +204,11 @@ func TestGetResolverErrors(t *testing.T) { dummy.Close() server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - _, err = w.Write([]byte(`[{"test": "aedMZXaLR246OHHjVtJKXQ=="}]`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`[{"test": "aedMZXaLR246OHHjVtJKXQ=="}]`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer server.Close() @@ -232,8 +241,11 @@ func TestInvalidServerResponse(t *testing.T) { defer dummy.Close() server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - _, err = w.Write([]byte(`[somerandomebytes`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`[somerandomebytes`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer server.Close() @@ -267,8 +279,11 @@ func TestAdditionalHeaders(t *testing.T) { if r.Host != "" { actual.Add("host", r.Host) } - _, err = w.Write([]byte(`{"test": "aedMZXaLR246OHHjVtJKXQ=="}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{"test": "aedMZXaLR246OHHjVtJKXQ=="}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer server.Close() @@ -310,14 +325,20 @@ func TestServerReturnCodes(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/", "/200": - _, err = w.Write([]byte(`{}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } case "/201": w.WriteHeader(201) case "/300": w.WriteHeader(300) - _, err = w.Write([]byte(`{}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } case "/401": w.WriteHeader(401) default: @@ -357,8 +378,11 @@ func TestAuthenticationBasic(t *testing.T) { var header http.Header server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { header = r.Header - _, err = w.Write([]byte(`{}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer server.Close() @@ -385,8 +409,11 @@ func TestAuthenticationToken(t *testing.T) { var header http.Header server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { header = r.Header - _, err = w.Write([]byte(`{}`)) - require.NoError(t, err) + if _, err = w.Write([]byte(`{}`)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } })) defer server.Close() diff --git a/plugins/secretstores/oauth2/oauth2_test.go b/plugins/secretstores/oauth2/oauth2_test.go index 3271488c653a4..724dec7fe356b 100644 --- a/plugins/secretstores/oauth2/oauth2_test.go +++ b/plugins/secretstores/oauth2/oauth2_test.go @@ -179,8 +179,11 @@ func TestGet(t *testing.T) { func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { - _, err := w.Write([]byte(err.Error())) - require.NoError(t, err) + if _, err := w.Write([]byte(err.Error())); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } w.WriteHeader(http.StatusInternalServerError) return } @@ -220,8 +223,11 @@ func TestGetMultipleTimes(t *testing.T) { func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { - _, err := w.Write([]byte(err.Error())) - require.NoError(t, err) + if _, err := w.Write([]byte(err.Error())); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } w.WriteHeader(http.StatusInternalServerError) return } @@ -267,8 +273,11 @@ func TestGetExpired(t *testing.T) { func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { - _, err := w.Write([]byte(err.Error())) - require.NoError(t, err) + if _, err := w.Write([]byte(err.Error())); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } w.WriteHeader(http.StatusInternalServerError) return } @@ -309,8 +318,11 @@ func TestGetRefresh(t *testing.T) { func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { - _, err := w.Write([]byte(err.Error())) - require.NoError(t, err) + if _, err := w.Write([]byte(err.Error())); err != nil { + w.WriteHeader(http.StatusInternalServerError) + t.Error(err) + return + } w.WriteHeader(http.StatusInternalServerError) return } diff --git a/testutil/container.go b/testutil/container.go index eeecdd620ce0e..48131d1d26d2f 100644 --- a/testutil/container.go +++ b/testutil/container.go @@ -78,9 +78,7 @@ func (c *Container) Start() error { } c.container = cntnr - c.Logs = TestLogConsumer{ - Msgs: []string{}, - } + c.Logs = TestLogConsumer{} c.container.FollowOutput(&c.Logs) err = c.container.StartLogProducer(c.ctx) if err != nil { diff --git a/testutil/testutil.go b/testutil/testutil.go index db548e94d327b..67f7f279685eb 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -108,3 +108,10 @@ func DefaultSampleConfig(sampleConfig string) []byte { re := regexp.MustCompile(`(?m)(^\s+)#\s*`) return []byte(re.ReplaceAllString(sampleConfig, "$1")) } + +func WithinDefaultDelta(dt float64) bool { + if dt < -DefaultDelta || dt > DefaultDelta { + return false + } + return true +} diff --git a/tools/readme_linter/main.go b/tools/readme_linter/main.go index 5c6f6af24b0a0..63e24662fa485 100644 --- a/tools/readme_linter/main.go +++ b/tools/readme_linter/main.go @@ -97,7 +97,7 @@ func checkFile(filename string, pluginType plugin, sourceFlag bool) (bool, error scanner := bufio.NewScanner(bytes.NewReader(md)) scanner.Split(bufio.ScanRunes) offset := 0 - newlineOffsets := []int{} + newlineOffsets := make([]int, 0) for scanner.Scan() { if scanner.Text() == "\n" { newlineOffsets = append(newlineOffsets, offset) diff --git a/tools/readme_linter/rules.go b/tools/readme_linter/rules.go index 421def5645a85..b883601f4b60f 100644 --- a/tools/readme_linter/rules.go +++ b/tools/readme_linter/rules.go @@ -92,7 +92,7 @@ func noLongLinesInParagraphs(threshold int) func(*T, ast.Node) error { return func(t *T, root ast.Node) error { // We're looking for long lines in paragraphs. Find paragraphs // first, then which lines are in paragraphs - paraLines := []int{} + paraLines := make([]int, 0) for n := root.FirstChild(); n != nil; n = n.NextSibling() { var p *ast.Paragraph var ok bool @@ -108,7 +108,7 @@ func noLongLinesInParagraphs(threshold int) func(*T, ast.Node) error { } // Find long lines in the whole file - longLines := []int{} + longLines := make([]int, 0, len(t.newlineOffsets)) last := 0 for i, cur := range t.newlineOffsets { length := cur - last - 1 // -1 to exclude the newline @@ -121,7 +121,7 @@ func noLongLinesInParagraphs(threshold int) func(*T, ast.Node) error { // Merge both lists p := 0 l := 0 - bads := []int{} + bads := make([]int, 0, max(len(paraLines), len(longLines))) for p < len(paraLines) && l < len(longLines) { long := longLines[l] para := paraLines[p]