From a4f6ff96cf7265001db5fd6713b85b6abd4a5608 Mon Sep 17 00:00:00 2001 From: Christian Galsterer Date: Sat, 14 Jan 2017 08:03:44 +0100 Subject: [PATCH] * Update to Beats 5.1.2 --- README.md | 1 + glide.lock | 6 +- glide.yaml | 2 +- main.go | 2 +- .../elastic/beats/CHANGELOG.asciidoc | 18 +- vendor/github.com/elastic/beats/Makefile | 4 + .../beats/dev-tools/packer/version.yml | 2 +- .../filebeat/docs/filebeat-filtering.asciidoc | 88 +++++- .../docs/reference/configuration.asciidoc | 2 +- .../configuration/filebeat-options.asciidoc | 4 +- .../beats/filebeat/docs/version.asciidoc | 1 + .../filebeat/filebeat.template-es2x.json | 2 +- .../beats/filebeat/filebeat.template.json | 2 +- .../beats/filebeat/registrar/registrar.go | 24 +- .../filebeat/tests/system/test_migration.py | 178 +++++++++++ .../filebeat/tests/system/test_registrar.py | 108 ------- .../beats/heartbeat/docs/version.asciidoc | 1 + .../heartbeat/heartbeat.template-es2x.json | 2 +- .../beats/heartbeat/heartbeat.template.json | 2 +- .../elastic/beats/libbeat/beat/version.go | 2 +- .../libbeat/docs/communitybeats.asciidoc | 4 + .../libbeat/docs/processors-config.asciidoc | 291 +++++++++++------- .../beats/libbeat/docs/processors.asciidoc | 17 +- .../beats/libbeat/docs/release.asciidoc | 1 + .../beats/libbeat/docs/repositories.asciidoc | 4 +- .../beats/libbeat/docs/version.asciidoc | 1 + .../elastic/beats/libbeat/scripts/Makefile | 5 +- .../libbeat/scripts/generate_index_pattern.py | 2 + .../beats/metricbeat/_meta/beat.full.yml | 2 +- .../beats/metricbeat/docs/fields.asciidoc | 36 +-- .../docs/metricbeat-filtering.asciidoc | 38 +-- .../beats/metricbeat/docs/modules.asciidoc | 23 +- .../metricbeat/docs/modules/docker.asciidoc | 6 +- .../metricbeat/docs/modules_list.asciidoc | 30 ++ .../docs/reference/configuration.asciidoc | 2 +- .../configuration/metricbeat-options.asciidoc | 10 +- .../beats/metricbeat/docs/version.asciidoc | 1 + .../beats/metricbeat/metricbeat.full.yml | 2 +- .../metricbeat/metricbeat.template-es2x.json | 2 +- .../beats/metricbeat/metricbeat.template.json | 2 +- .../module/docker/_meta/docs.asciidoc | 4 +- .../metricbeat/module/docker/_meta/fields.yml | 4 +- .../docker/container/_meta/docs.asciidoc | 5 +- .../module/docker/container/_meta/fields.yml | 8 +- .../module/docker/cpu/_meta/docs.asciidoc | 4 +- .../module/docker/cpu/_meta/fields.yml | 12 +- .../module/docker/diskio/_meta/docs.asciidoc | 4 +- .../module/docker/diskio/_meta/fields.yml | 4 +- .../beats/metricbeat/module/docker/docker.go | 26 +- .../module/docker/info/_meta/docs.asciidoc | 5 +- .../module/docker/info/_meta/fields.yml | 4 +- .../module/docker/memory/_meta/docs.asciidoc | 4 +- .../module/docker/memory/_meta/fields.yml | 2 +- .../module/docker/network/_meta/docs.asciidoc | 4 +- .../module/docker/network/_meta/fields.yml | 2 +- .../metricbeat/scripts/docs_collector.py | 17 + .../dashboard/Packetbeat-Cassandra.json | 18 +- .../docs/packetbeat-filtering.asciidoc | 2 +- .../docs/reference/configuration.asciidoc | 2 +- .../configuration/packetbeat-options.asciidoc | 5 +- .../beats/packetbeat/docs/version.asciidoc | 1 + .../packetbeat/packetbeat.template-es2x.json | 2 +- .../beats/packetbeat/packetbeat.template.json | 2 +- .../beats/testing/environments/base.yml | 4 - .../docker/kibana/Dockerfile-snapshot | 2 +- .../docker/logstash/Dockerfile-snapshot | 4 +- .../beats/testing/environments/snapshot.yml | 4 +- .../docs/reference/configuration.asciidoc | 2 +- .../configuration/winlogbeat-options.asciidoc | 37 ++- .../beats/winlogbeat/docs/version.asciidoc | 1 + .../beats/winlogbeat/eventlog/bench_test.go | 137 +++++++++ .../beats/winlogbeat/eventlog/eventlog.go | 26 +- .../beats/winlogbeat/eventlog/eventlogging.go | 1 + .../winlogbeat/eventlog/eventlogging_test.go | 14 +- .../beats/winlogbeat/eventlog/wineventlog.go | 61 ++-- .../winlogbeat/eventlog/wineventlog_test.go | 69 +++++ .../sys/wineventlog/syscall_windows.go | 1 + .../sys/wineventlog/wineventlog_windows.go | 4 + .../winlogbeat/winlogbeat.template-es2x.json | 2 +- .../beats/winlogbeat/winlogbeat.template.json | 2 +- 80 files changed, 991 insertions(+), 451 deletions(-) create mode 100644 vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py create mode 100644 vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc create mode 100644 vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go diff --git a/README.md b/README.md index 7be916d..9811232 100644 --- a/README.md +++ b/README.md @@ -144,6 +144,7 @@ This will fetch and create all images required for the build process. The hole p Feature release containing the following changes: * Various documentation improvements +* Update to beats v5.1.2 * Update to Go 1.7.4 * Fix: [Panic when using TLS configuration (Go 1.7.4)](https://github.com/christiangalsterer/httpbeat/pull/15) diff --git a/glide.lock b/glide.lock index 91a8808..ed6165c 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 93c95c0b5a550e9736c13aaf967cedef2a72b92b929cc657b8d421568db33e07 -updated: 2016-12-18T13:10:35.025185352+01:00 +hash: 109c806aeb54faf4c315370b2b546c3d1848c3425ffd53cc62d78d198fe81b58 +updated: 2017-01-14T07:23:08.287535106+01:00 imports: - name: github.com/davecgh/go-spew version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d @@ -14,7 +14,7 @@ imports: - name: github.com/eapache/queue version: ded5959c0d4e360646dc9e9908cff48666781367 - name: github.com/elastic/beats - version: 4b4106320c10fdae5ea6e3b376b9ee8d1d50162d + version: b1ff93b4ae3aa5d8032f3b8d22e4e7746aebb0f4 subpackages: - libbeat/beat - libbeat/cfgfile diff --git a/glide.yaml b/glide.yaml index 5d61a94..39f1c65 100644 --- a/glide.yaml +++ b/glide.yaml @@ -6,7 +6,7 @@ owners: homepage: https://github.com/christiangalsterer import: - package: github.com/elastic/beats - version: v5.1.1 + version: v5.1.2 subpackages: - libbeat/beat - libbeat/cfgfile diff --git a/main.go b/main.go index cc0d7e7..2a75577 100644 --- a/main.go +++ b/main.go @@ -6,7 +6,7 @@ import ( "os" ) -var version = "3.1.1" +var version = "3.2.0-SNAPSHOT" var name = "httpbeat" func main() { diff --git a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc index e43497b..8b43e37 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc @@ -8,7 +8,7 @@ // Template, add newest changes here === Beats version HEAD -https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff] +https://github.com/elastic/beats/compare/v5.1.2...5.1[Check the HEAD diff] ==== Breaking changes @@ -32,6 +32,7 @@ https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff] *Packetbeat* *Filebeat* +- Fix registry migration issue from old states were files were only harvested after second restart. {pull}3322[3322] *Winlogbeat* @@ -61,6 +62,21 @@ https://github.com/elastic/beats/compare/v5.1.1...5.1[Check the HEAD diff] //////////////////////////////////////////////////////////// +[[release-notes-5.1.2]] +=== Beats version 5.1.2 +https://github.com/elastic/beats/compare/v5.1.1...v5.1.2[View commits] + +==== Bugfixes + +*Packetbeat* + +- Fix error on importing dashboards due to colons in the Cassandra dashboard. {issue}3140[3140] +- Fix error on importing dashboards due to the wrong type for the geo_point fields. {pull}3147[3147] + +*Winlogbeat* + +- Fix for "The array bounds are invalid" error when reading large events. {issue}3076[3076] + [[release-notes-5.1.1]] === Beats version 5.1.1 https://github.com/elastic/beats/compare/v5.0.2...v5.1.1[View commits] diff --git a/vendor/github.com/elastic/beats/Makefile b/vendor/github.com/elastic/beats/Makefile index 17413ff..ebbebef 100644 --- a/vendor/github.com/elastic/beats/Makefile +++ b/vendor/github.com/elastic/beats/Makefile @@ -3,6 +3,7 @@ BUILD_DIR=build COVERAGE_DIR=${BUILD_DIR}/coverage BEATS=packetbeat filebeat winlogbeat metricbeat heartbeat PROJECTS=libbeat ${BEATS} +PROJECTS_ENV=libbeat metricbeat SNAPSHOT?=yes # Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection. @@ -12,6 +13,9 @@ testsuite: $(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;) #$(MAKE) -C generate test +stop-environments: + $(foreach var,$(PROJECTS_ENV),$(MAKE) -C $(var) stop-environment || exit 0;) + # Runs unit and system tests without coverage and race detection. .PHONY: test test: diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml index 4f280a7..b887469 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml +++ b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml @@ -1 +1 @@ -version: "5.1.1" +version: "5.1.2" diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc index 0378ed1..b90673f 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc @@ -1,16 +1,32 @@ [[filtering-and-enhancing-data]] == Filtering and Enhancing the Exported Data -When your use case requires only a subset of the data exported by Filebeat or you need to add metadata, you can <>, or you can <>. +Your use case might require only a subset of the data exported by Filebeat, or +you might need to enhance the exported data (for example, by adding metadata). +Filebeat provides a couple of options for filtering and enhancing exported +data. You can: + +* <> to +configure each prospector to include or exclude specific lines or files. +* <> to configure global processing +across all data exported by Filebeat. [float] [[filebeat-filtering-overview]] -=== Filebeat Config Options for Filtering +=== Filtering at the Prospector Level + +You can specify filtering options at the prospector level to configure which +lines or files are included or excluded in the output. This allows you to +specify different filtering criteria for each prospector. -You can specify configuration options in the `filebeat` section of the config file to define regular expressions that -match the lines you want to include and/or exclude from the output. The supported options are <>, <>, and <>. +You configure prospector-level filtering in the `filebeat.prospectors` section +of the config file by specifying regular expressions that match the lines you +want to include and/or exclude from the output. The supported options are +<>, <>, and +<>. -For example, you can use the `include_lines` option to export any lines that start with "ERR" or "WARN": +For example, you can use the `include_lines` option to export any lines that +start with "ERR" or "WARN": [source,yaml] ------------------------------------------------------------------------------------- @@ -21,9 +37,11 @@ filebeat.prospectors: include_lines: ["^ERR", "^WARN"] ------------------------------------------------------------------------------------- -The disadvantage of this approach is that you need to implement a configuration option for each filtering criteria that you need. +The disadvantage of this approach is that you need to implement a +configuration option for each filtering criteria that you need. -See <> for more information about each option. +See <> for more +information about each option. [float] [[defining-processors]] @@ -31,7 +49,11 @@ See <> for more i include::../../libbeat/docs/processors.asciidoc[] -For example, the following configuration drops all the DEBUG messages. +[float] +[[drop-event-example]] +==== Drop Event Example + +The following configuration drops all the DEBUG messages. [source,yaml] ----------------------------------------------------- @@ -53,4 +75,54 @@ processors: source: "test" ---------------- +[float] +[[decode-json-example]] +==== Decode JSON Example + +In the following example, the fields exported by Filebeat include a +field, `inner`, whose value is a JSON object encoded as a string: + +[source,json] +----------------------------------------------------- +{ "outer": "value", "inner": "{\"data\": \"value\"}" } +----------------------------------------------------- + +The following configuration decodes the inner JSON object: + +[source,yaml] +----------------------------------------------------- +filebeat.prospectors: +- paths: + - input.json + json.keys_under_root: true + +processors: + - decode_json_fields: + fields: ["inner"] + +output.console.pretty: true +----------------------------------------------------- + +The resulting output looks something like this: + +["source","json",subs="attributes"] +----------------------------------------------------- +{ + "@timestamp": "2016-12-06T17:38:11.541Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com", + "version": "{version}" + }, + "inner": { + "data": "value" + }, + "input_type": "log", + "offset": 55, + "outer": "value", + "source": "input.json", + "type": "log" +} +----------------------------------------------------- + See <> for more information. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc index 8ab66d0..309158a 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration.asciidoc @@ -12,7 +12,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> * <> -* <> * <> * <> * <> @@ -22,6 +21,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> * <> +* <> include::configuration/filebeat-options.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc index 17d81c6..2e384f8 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/reference/configuration/filebeat-options.asciidoc @@ -560,11 +560,11 @@ filebeat.shutdown_timeout: 5s include::../../../../libbeat/docs/generalconfig.asciidoc[] -include::../../../../libbeat/docs/processors-config.asciidoc[] - include::../../../../libbeat/docs/outputconfig.asciidoc[] include::../../../../libbeat/docs/shared-path-config.asciidoc[] include::../../../../libbeat/docs/loggingconfig.asciidoc[] +include::../../../../libbeat/docs/processors-config.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc index 07dfbaf..ec0507b 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/version.asciidoc @@ -1,2 +1,3 @@ :stack-version: 5.1.1 :doc-branch: 5.1 +:go-version: 1.7.1 diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json b/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json index c91a6d6..f8faf52 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.template.json b/vendor/github.com/elastic/beats/filebeat/filebeat.template.json index a49b719..4b589c4 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.template.json +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go b/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go index bc2a98c..5ac9ff8 100644 --- a/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go +++ b/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go @@ -118,15 +118,7 @@ func (r *Registrar) loadStates() error { return fmt.Errorf("Error decoding states: %s", err) } - // Set all states to finished and disable TTL on restart - // For all states covered by a prospector, TTL will be overwritten with the prospector value - for key, state := range states { - state.Finished = true - // Set ttl to -2 to easily spot which states are not managed by a prospector - state.TTL = -2 - states[key] = state - } - + states = resetStates(states) r.states.SetStates(states) logp.Info("States Loaded from registrar: %+v", len(states)) @@ -176,6 +168,7 @@ func (r *Registrar) loadAndConvertOldState(f *os.File) bool { // Convert old states to new states logp.Info("Old registry states found: %v", len(oldStates)) states := convertOldStates(oldStates) + states = resetStates(states) r.states.SetStates(states) // Rewrite registry in new format @@ -186,6 +179,19 @@ func (r *Registrar) loadAndConvertOldState(f *os.File) bool { return true } +// resetStates sets all states to finished and disable TTL on restart +// For all states covered by a prospector, TTL will be overwritten with the prospector value +func resetStates(states []file.State) []file.State { + + for key, state := range states { + state.Finished = true + // Set ttl to -2 to easily spot which states are not managed by a prospector + state.TTL = -2 + states[key] = state + } + return states +} + func convertOldStates(oldStates map[string]file.State) []file.State { // Convert old states to new states states := []file.State{} diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py new file mode 100644 index 0000000..62fe99c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_migration.py @@ -0,0 +1,178 @@ +from filebeat import BaseTest + +import os +import platform +import time +import shutil +import json +import stat +from nose.plugins.skip import Skip, SkipTest + + +class Test(BaseTest): + + def test_migration_non_windows(self): + """ + Tests if migration from old filebeat registry to new format works + """ + + if os.name == "nt": + raise SkipTest + + registry_file = self.working_dir + '/registry' + + # Write old registry file + with open(registry_file, 'w') as f: + f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}') + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/input*", + clean_removed="false", + clean_inactive="0", + ) + + filebeat = self.start_beat() + + self.wait_until( + lambda: self.log_contains("Old registry states found: 2"), + max_timeout=15) + + self.wait_until( + lambda: self.log_contains("Old states converted to new states and written to registrar: 2"), + max_timeout=15) + + filebeat.check_kill_and_wait() + + # Check if content is same as above + assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4 + assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6 + + # Compare first entry + oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}') + newJson = self.get_registry_entry_by_path("logs/hello.log") + del newJson["timestamp"] + del newJson["ttl"] + assert newJson == oldJson + + # Compare second entry + oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}') + newJson = self.get_registry_entry_by_path("logs/log2.log") + del newJson["timestamp"] + del newJson["ttl"] + assert newJson == oldJson + + # Make sure the right number of entries is in + data = self.get_registry() + assert len(data) == 2 + + def test_migration_windows(self): + """ + Tests if migration from old filebeat registry to new format works + """ + + if os.name != "nt": + raise SkipTest + + registry_file = self.working_dir + '/registry' + + # Write old registry file + with open(registry_file, 'w') as f: + f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}') + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/input*", + ) + + filebeat = self.start_beat() + + self.wait_until( + lambda: self.log_contains("Old registry states found: 2"), + max_timeout=15) + + self.wait_until( + lambda: self.log_contains("Old states converted to new states and written to registrar: 2"), + max_timeout=15) + + filebeat.check_kill_and_wait() + + # Check if content is same as above + assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4 + assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6 + + # Compare first entry + oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}') + newJson = self.get_registry_entry_by_path("logs/hello.log") + del newJson["timestamp"] + del newJson["ttl"] + assert newJson == oldJson + + # Compare second entry + oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}') + newJson = self.get_registry_entry_by_path("logs/log2.log") + del newJson["timestamp"] + del newJson["ttl"] + assert newJson == oldJson + + # Make sure the right number of entries is in + data = self.get_registry() + assert len(data) == 2 + + def test_migration_continue_reading(self): + """ + Tests if after the migration filebeat keeps reading the file + """ + + os.mkdir(self.working_dir + "/log/") + testfile1 = self.working_dir + "/log/test.log" + + with open(testfile1, 'w') as f: + f.write("entry10\n") + + registry_file = self.working_dir + '/registry' + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + output_file_filename="filebeat_1", + ) + + # Run filebeat to create a registry + filebeat = self.start_beat(output="filebeat1.log") + self.wait_until( + lambda: self.output_has(lines=1, output_file="output/filebeat_1"), + max_timeout=10) + filebeat.check_kill_and_wait() + + # Create old registry file out of the new one + r = self.get_registry() + registry_entry = r[0] + del registry_entry["timestamp"] + del registry_entry["ttl"] + old_registry = {registry_entry["source"]: registry_entry} + + # Overwrite registry + with open(registry_file, 'w') as f: + json.dump(old_registry, f) + + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + output_file_filename="filebeat_2", + ) + + filebeat = self.start_beat(output="filebeat2.log") + + # Wait until state is migrated + self.wait_until( + lambda: self.log_contains( + "Old states converted to new states and written to registrar: 1", "filebeat2.log"), + max_timeout=10) + + with open(testfile1, 'a') as f: + f.write("entry12\n") + + # After restart new output file is created -> only 1 new entry + self.wait_until( + lambda: self.output_has(lines=1, output_file="output/filebeat_2"), + max_timeout=10) + + filebeat.check_kill_and_wait() diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py index 8133176..159fe81 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py @@ -624,114 +624,6 @@ def test_state_after_rotation_ignore_older(self): assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9 assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8 - - def test_migration_non_windows(self): - """ - Tests if migration from old filebeat registry to new format works - """ - - if os.name == "nt": - raise SkipTest - - registry_file = self.working_dir + '/registry' - - # Write old registry file - with open(registry_file, 'w') as f: - f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}') - - self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/input*", - clean_removed="false", - clean_inactive="0", - ) - - filebeat = self.start_beat() - - self.wait_until( - lambda: self.log_contains("Old registry states found: 2"), - max_timeout=15) - - self.wait_until( - lambda: self.log_contains("Old states converted to new states and written to registrar: 2"), - max_timeout=15) - - filebeat.check_kill_and_wait() - - # Check if content is same as above - assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4 - assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6 - - # Compare first entry - oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}') - newJson = self.get_registry_entry_by_path("logs/hello.log") - del newJson["timestamp"] - del newJson["ttl"] - assert newJson == oldJson - - # Compare second entry - oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}') - newJson = self.get_registry_entry_by_path("logs/log2.log") - del newJson["timestamp"] - del newJson["ttl"] - assert newJson == oldJson - - # Make sure the right number of entries is in - data = self.get_registry() - assert len(data) == 2 - - def test_migration_windows(self): - """ - Tests if migration from old filebeat registry to new format works - """ - - if os.name != "nt": - raise SkipTest - - registry_file = self.working_dir + '/registry' - - # Write old registry file - with open(registry_file, 'w') as f: - f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}') - - self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/input*", - ) - - filebeat = self.start_beat() - - self.wait_until( - lambda: self.log_contains("Old registry states found: 2"), - max_timeout=15) - - self.wait_until( - lambda: self.log_contains("Old states converted to new states and written to registrar: 2"), - max_timeout=15) - - filebeat.check_kill_and_wait() - - # Check if content is same as above - assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4 - assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6 - - # Compare first entry - oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}') - newJson = self.get_registry_entry_by_path("logs/hello.log") - del newJson["timestamp"] - del newJson["ttl"] - assert newJson == oldJson - - # Compare second entry - oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}') - newJson = self.get_registry_entry_by_path("logs/log2.log") - del newJson["timestamp"] - del newJson["ttl"] - assert newJson == oldJson - - # Make sure the right number of entries is in - data = self.get_registry() - assert len(data) == 2 - - def test_clean_inactive(self): """ Checks that states are properly removed after clean_inactive diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc index 07dfbaf..ec0507b 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/version.asciidoc @@ -1,2 +1,3 @@ :stack-version: 5.1.1 :doc-branch: 5.1 +:go-version: 1.7.1 diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json index 1112efa..730f382 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json index 1cf3399..2a2660a 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/libbeat/beat/version.go b/vendor/github.com/elastic/beats/libbeat/beat/version.go index def1eb8..8cdf225 100644 --- a/vendor/github.com/elastic/beats/libbeat/beat/version.go +++ b/vendor/github.com/elastic/beats/libbeat/beat/version.go @@ -1,3 +1,3 @@ package beat -const defaultBeatVersion = "5.1.1" +const defaultBeatVersion = "5.1.2" diff --git a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc index 4e7ac22..a33191b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc @@ -7,9 +7,11 @@ out some of them here. NOTE: Elastic provides no warranty or support for community-sourced Beats. [horizontal] +https://github.com/awormuth/amazonbeat[amazonbeat]:: Reads data from a specified Amazon product. https://github.com/radoondas/apachebeat[apachebeat]:: Reads status from Apache HTTPD server-status. https://github.com/goomzee/burrowbeat[burrowbeat]:: Monitors Kafka consumer lag using Burrow. https://github.com/goomzee/cassandrabeat[cassandrabeat]:: Uses Cassandra's nodetool cfstats utility to monitor Cassandra database nodes and lag. +https://github.com/hartfordfive/cloudflarebeat[cloudflarebeat]:: Indexes log entries from the Cloudflare Enterprise Log Share API. https://github.com/aidan-/cloudtrailbeat[cloudtrailbeat]:: Reads events from Amazon Web Services' https://aws.amazon.com/cloudtrail/[CloudTrail]. https://github.com/Pravoru/consulbeat[consulbeat]:: Reads services health checks from consul and pushes them to elastic. https://github.com/Ingensi/dockbeat[dockbeat]:: Reads Docker container @@ -27,6 +29,7 @@ https://github.com/radoondas/jmxproxybeat[jmxproxybeat]:: Reads Tomcat JMX metri https://github.com/mheese/journalbeat[journalbeat]:: Used for log shipping from systemd/journald based Linux systems. https://github.com/eskibars/lmsensorsbeat[lmsensorsbeat]:: Collects data from lm-sensors (such as CPU temperatures, fan speeds, and voltages from i2c and smbus). https://github.com/consulthys/logstashbeat[logstashbeat]:: Collects data from Logstash monitoring API (v5 onwards) and indexes them in Elasticsearch. +https://github.com/scottcrespo/mongobeat[mongobeat]:: Monitors MongoDB instances and can be configured to send multiple document types to Elasticsearch. https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and send results to Elasticsearch. https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data. https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx. @@ -44,6 +47,7 @@ https://github.com/martinhoefling/saltbeat[saltbeat]:: Reads events from salt ma https://github.com/consulthys/springbeat[springbeat]:: Collects health and metrics data from Spring Boot applications running with the actuator module. https://github.com/buehler/go-elastic-twitterbeat[twitterbeat]:: Reads tweets for specified screen names. https://github.com/gravitational/udpbeat[udpbeat]:: Ships structured logs via UDP. +https://github.com/hartfordfive/udplogbeat[udplogbeat]:: Accept events via local UDP socket (in plain-text or JSON with ability to enforce schemas). Can also be used for applications only supporting syslog logging. https://github.com/cleesmith/unifiedbeat[unifiedbeat]:: Reads records from Unified2 binary files generated by network intrusion detection software and indexes the records in Elasticsearch. https://github.com/mrkschan/uwsgibeat[uwsgibeat]:: Reads stats from uWSGI. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc index e994b95..c1201dc 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors-config.asciidoc @@ -11,22 +11,21 @@ ////////////////////////////////////////////////////////////////////////// [[configuration-processors]] -=== Processors Configuration +== Processors include::../../libbeat/docs/processors.asciidoc[] -Each processor has associated an action with a set of parameters and optionally a condition. If the condition is -present, then the action is executed only if the condition -is fulfilled. If no condition is passed then the action is always executed. +To define a processor, you specify the processor name, an optional condition, +and a set of parameters: [source,yaml] ------ processors: - - : + - : when: - - : + - : when: @@ -34,21 +33,33 @@ processors: ------ -where can be a way to select the fields that are exported or a way to add meta data to the event , contains the definition of the condition. -and is the list of parameters passed along the . +Where: + +* specifies a <> that performs some kind +of action, such as selecting the fields that are exported or adding metadata to +the event. +* specifies an optional <>. If the +condition is present, then the action is executed only if the condition is +fulfilled. If no condition is passed, then the action is always executed. +* is the list of parameters to pass to the processor. See <> for specific {beatname_uc} examples. -[[filtering-condition]] -==== Condition +[float] +[[conditions]] +=== Conditions -Each condition receives a field to compare or multiple fields under the same condition and then `AND` is used between -them. You can see a list of the <>. +Each condition receives a field to compare. You can specify multiple fields +under the same condition by using `AND` between the fields (for example, +`field1 AND field2`). -For each field, you can specify a simple field name or a nested map, for example `dns.question.name`. +For each field, you can specify a simple field name or a nested map, for +example `dns.question.name`. +See <> for a list of all the fields that are exported by +{beatname_uc}. -A condition can be: +The supported conditions are: * <> * <> @@ -59,14 +70,15 @@ A condition can be: * <> - +[float] [[condition-equals]] -===== equals +==== equals -With the `equals` condition, you can compare if a field has a certain value. The condition accepts only an integer or a string -value. +With the `equals` condition, you can compare if a field has a certain value. +The condition accepts only an integer or a string value. -For example, the following condition checks if the response code of the HTTP transaction is 200: +For example, the following condition checks if the response code of the HTTP +transaction is 200: [source,yaml] ------- @@ -74,13 +86,15 @@ equals: http.response.code: 200 ------- +[float] [[condition-contains]] -===== contains +==== contains The `contains` condition checks if a value is part of a field. The field can be a string or an array of strings. The condition accepts only a string value. -For example, the following condition checks if an error is part of the transaction status: +For example, the following condition checks if an error is part of the +transaction status: [source,yaml] ------ @@ -88,13 +102,15 @@ contains: status: "Specific error" ------ - +[float] [[condition-regexp]] -===== regexp +==== regexp -The `regexp` condition checks the field against a regular expression. The condition accepts only strings. +The `regexp` condition checks the field against a regular expression. The +condition accepts only strings. -For example, the following condition checks if the process name starts with `foo`: +For example, the following condition checks if the process name starts with +`foo`: [source,yaml] ----- @@ -102,14 +118,16 @@ reqexp: system.process.name: "foo.*" ----- +[float] [[condition-range]] -===== range +==== range -The `range` condition checks if the field is in a certain range of values. The condition supports `lt`, `lte`, `gt` and `gte`. The -condition accepts only integer or float values. +The `range` condition checks if the field is in a certain range of values. The +condition supports `lt`, `lte`, `gt` and `gte`. The condition accepts only +integer or float values. -For example, the following condition checks for failed HTTP transaction by comparing the `http.response.code` field with -400. +For example, the following condition checks for failed HTTP transactions by +comparing the `http.response.code` field with 400. [source,yaml] @@ -119,7 +137,7 @@ range: gte: 400 ------ -that can be also translated to: +This can also be written as: [source,yaml] ---- @@ -127,7 +145,8 @@ range: http.response.code.gte: 400 ---- -For example, the following condition checks if the CPU usage in percentage has a value between 0.5 and 0.8. +The following condition checks if the CPU usage in percentage has a value +between 0.5 and 0.8. [source,yaml] ------ @@ -137,8 +156,9 @@ range: ------ +[float] [[condition-or]] -===== OR +==== OR The `or` operator receives a list of conditions. @@ -152,7 +172,8 @@ or: ------- -For example the condition `http.response.code = 304 OR http.response.code = 404` translates to: +For example, to configure the condition +`http.response.code = 304 OR http.response.code = 404`: [source,yaml] ------ @@ -163,9 +184,9 @@ or: http.response.code: 404 ------ - +[float] [[condition-and]] -===== AND +==== AND The `and` operator receives a list of conditions. @@ -179,7 +200,8 @@ and: ------- -For example the condition `http.response.code = 200 AND status = OK` translates to: +For example, to configure the condition +`http.response.code = 200 AND status = OK`: [source,yaml] ------ @@ -202,8 +224,9 @@ or: ------ +[float] [[condition-not]] -===== NOT +==== NOT The `not` operator receives the condition to negate. @@ -214,7 +237,7 @@ not: ------- -For example the condition `NOT status = OK` translates to: +For example, to configure the condition `NOT status = OK`: [source,yaml] ------ @@ -223,81 +246,24 @@ not: status: OK ------ +[float] +[[processors]] +=== Processors +The supported processors are: -==== Actions - -The supported actions are: - - * <> - * <> - * <> * <> - -See <> for the full list of possible fields. - -[[include-fields]] -===== include_fields - -The `include_fields` action specifies what fields to export if a certain condition is fulfilled. The condition is -optional and if it's missing then the defined fields are always exported. The `@timestamp` and -`type` fields are always exported, even if they are not defined in the `include_fields` list. - -[source,yaml] -------- -processors: - - include_fields: - when: - condition - fields: ["field1", "field2", ...] -------- - -You can specify multiple `include_fields` actions under the `processors` section. - - -NOTE: If you define an empty list of fields under `include_fields`, then only the required fields, `@timestamp` and `type`, are -exported. - - -[[drop-fields]] -===== drop_fields - -The `drop_fields` action specifies what fields to drop if a certain condition is fulfilled. The condition is optional -and if it's missing then the defined fields are always dropped. The `@timestamp` and `type` fields cannot be dropped, -even if they show up in the `drop_fields` list. - -[source,yaml] ------------------------------------------------------ -processors: - - drop_fields: - when: - condition - fields: ["field1", "field2", ...] ------------------------------------------------------ - -NOTE: If you define an empty list of fields under `drop_fields`, then no fields are dropped. - - -[[drop-event]] -===== drop_event - -The `drop_event` action drops the entire event if the associated condition is fulfilled. The condition is mandatory, as -without one all the events are dropped. - -[source,yaml] ------- -processors: - - drop_event: - when: - condition ------- + * <> + * <> + * <> + * <> [[add-cloud-metadata]] -===== add_cloud_metadata +=== add_cloud_metadata -The `add_cloud_metadata` action enriches each event with instance metadata from -the machine's hosting provider. At startup it will detect the hosting provider -and cache the instance metadata. +The `add_cloud_metadata` processor enriches each event with instance metadata +from the machine's hosting provider. At startup it will detect the hosting +provider and cache the instance metadata. Three cloud providers are supported. @@ -308,14 +274,16 @@ Three cloud providers are supported. The simple configuration below enables the processor. [source,yaml] --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- processors: - add_cloud_metadata: --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- -The `add_cloud_metadata` action has one optional configuration setting named +The `add_cloud_metadata` processor has one optional configuration setting named `timeout` that specifies the maximum amount of time to wait for a successful -response when detecting the hosting provider. The default timeout value is `3s`. +response when detecting the hosting provider. The default timeout value is +`3s`. + If a timeout occurs then no instance metadata will be added to the events. This makes it possible to enable this processor for all your deployments (in the cloud or on-premise). @@ -326,7 +294,7 @@ examples for each of the supported providers. _EC2_ [source,json] --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- { "meta": { "cloud": { @@ -338,12 +306,12 @@ _EC2_ } } } --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- _Digital Ocean_ [source,json] --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- { "meta": { "cloud": { @@ -353,12 +321,12 @@ _Digital Ocean_ } } } --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- _GCE_ [source,json] --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- { "meta": { "cloud": { @@ -370,4 +338,91 @@ _GCE_ } } } --------------------------------------------------------------------------------- +------------------------------------------------------------------------------- + +[[decode-json-fields]] +=== decode_json_fields + +The `decode_json_fields` processor decodes fields containing JSON strings and +replaces the strings with valid JSON objects. + +[source,yaml] +----------------------------------------------------- +processors: + - decode_json_fields: + fields: ["field1", "field2", ...] + process_array: false + max_depth: 1 +----------------------------------------------------- + +The `decode_json_fields` processor has the following configuration settings: + +`fields`:: The fields containing JSON strings to decode. +`process_array`:: (Optional) A boolean that specifies whether to process +arrays. The default is false. +`max_depth`:: (Optional) The maximum parsing depth. The default is 1. + +[[drop-event]] +=== drop_event + +The `drop_event` processor drops the entire event if the associated condition +is fulfilled. The condition is mandatory, because without one, all the events +are dropped. + +[source,yaml] +------ +processors: + - drop_event: + when: + condition +------ + +See <> for a list of supported conditions. + +[[drop-fields]] +=== drop_fields + +The `drop_fields` processor specifies which fields to drop if a certain +condition is fulfilled. The condition is optional. If it's missing, the +specified fields are always dropped. The `@timestamp` and `type` fields cannot +be dropped, even if they show up in the `drop_fields` list. + +[source,yaml] +----------------------------------------------------- +processors: + - drop_fields: + when: + condition + fields: ["field1", "field2", ...] +----------------------------------------------------- + +See <> for a list of supported conditions. + +NOTE: If you define an empty list of fields under `drop_fields`, then no fields +are dropped. + +[[include-fields]] +=== include_fields + +The `include_fields` processor specifies which fields to export if a certain +condition is fulfilled. The condition is optional. If it's missing, the +specified fields are always exported. The `@timestamp` and `type` fields are +always exported, even if they are not defined in the `include_fields` list. + +[source,yaml] +------- +processors: + - include_fields: + when: + condition + fields: ["field1", "field2", ...] +------- + +See <> for a list of supported conditions. + +You can specify multiple `include_fields` processors under the `processors` +section. + +NOTE: If you define an empty list of fields under `include_fields`, then only +the required fields, `@timestamp` and `type`, are exported. + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc index 9949164..923c1e5 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors.asciidoc @@ -9,15 +9,18 @@ //// include::../../libbeat/docs/filtering.asciidoc[] ////////////////////////////////////////////////////////////////////////// -You can define processors in your configuration to process events before they are sent to the configured output. -The libbeat library provides processors for reducing the number of exported fields, and processors for -enhancing events with additional metadata. Each processor receives an event, applies a defined action to the event, -and returns the event. If you define a list of processors, they are executed in the order they are defined in the -configuration file. +You can define processors in your configuration to process events before they +are sent to the configured output.The libbeat library provides processors for: + +* reducing the number of exported fields +* enhancing events with additional metadata +* performing additional processing and decoding + +Each processor receives an event, applies a defined action to the event, and +returns the event. If you define a list of processors, they are executed in the +order they are defined in the {beatname_uc} configuration file. [source,yaml] ------- event -> processor 1 -> event1 -> processor 2 -> event2 ... ------- - -The processors are defined in the {beatname_uc} configuration file. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc index 29a0185..eb2a844 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc @@ -6,6 +6,7 @@ -- This section summarizes the changes in each release. +* <> * <> * <> * <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc index 2db4276..ea78654 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc @@ -34,7 +34,7 @@ sudo apt-get install apt-transport-https + ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -echo "deb https://artifacts.elastic.co/packages/5.x-prerelease/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list +echo "deb https://artifacts.elastic.co/packages/5.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list -------------------------------------------------- + [WARNING] @@ -85,7 +85,7 @@ your `/etc/yum.repos.d/` directory and add the following lines: -------------------------------------------------- [elastic-5.x] name=Elastic repository for 5.x packages -baseurl=https://artifacts.elastic.co/packages/5.x-prerelease/yum +baseurl=https://artifacts.elastic.co/packages/5.x/yum gpgcheck=1 gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch enabled=1 diff --git a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc index 07dfbaf..ec0507b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc @@ -1,2 +1,3 @@ :stack-version: 5.1.1 :doc-branch: 5.1 +:go-version: 1.7.1 diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile index d410306..2f79609 100755 --- a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile +++ b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile @@ -43,7 +43,8 @@ TEST_ENVIRONMENT?=false SYSTEM_TESTS?=false GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd TESTING_ENVIRONMENT?=snapshot -DOCKER_COMPOSE?=docker-compose -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml +DOCKER_COMPOSE_PROJECT_NAME?=${BEATNAME}_${TESTING_ENVIRONMENT} +DOCKER_COMPOSE?=docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} -f ${PWD}/../testing/environments/base.yml -f ${PWD}/../testing/environments/${TESTING_ENVIRONMENT}.yml -f docker-compose.yml DOCKER_CACHE?=1 # If set to 0, all docker images are created without cache GOPACKAGES_COMMA_SEP=$(subst $(space),$(comma),$(strip ${GOPACKAGES})) PYTHON_ENV?=${BUILD_DIR}/python-env @@ -304,7 +305,7 @@ start-environment: stop-environment .PHONY: stop-environment stop-environment: -${DOCKER_COMPOSE} stop - -${DOCKER_COMPOSE} rm -f -v -a + -${DOCKER_COMPOSE} rm -f -v .PHONY: write-environment write-environment: diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py b/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py index e719292..8bd2355 100644 --- a/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py +++ b/vendor/github.com/elastic/beats/libbeat/scripts/generate_index_pattern.py @@ -60,6 +60,8 @@ def field_to_json(desc, path, output): field["aggregatable"] = False elif desc["type"] == "date": field["type"] = "date" + elif desc["type"] == "geo_point": + field["type"] = "geo_point" else: field["type"] = "string" diff --git a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml index a915b2b..8bcb0c7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/_meta/beat.full.yml @@ -67,7 +67,7 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 -#------------------------------- docker Module ------------------------------- +#------------------------------- Docker Module ------------------------------- #- module: docker #metricsets: ["cpu", "info", "memory", "network", "diskio", "container"] #hosts: ["unix:///var/run/docker.sock"] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc index 00e568f..d025cac 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc @@ -506,7 +506,7 @@ The document type. Always set to "metricsets". [[exported-fields-docker]] -== docker Fields +== Docker Fields experimental[] Docker stats collected from Docker. @@ -516,14 +516,14 @@ Docker stats collected from Docker. [float] == docker Fields -docker contains different informations and statistics of docker's containers running +Information and statistics about docker's running containers. [float] == container Fields -Docker container metrics +Docker container metrics. @@ -532,7 +532,7 @@ Docker container metrics type: keyword -Executed command in docker container. +Command that was executed in the Docker container. [float] @@ -540,7 +540,7 @@ Executed command in docker container. type: date -Date then the container was created. +Date when the container was created. [float] @@ -595,13 +595,13 @@ Total size of all the files in the container. type: long -Size of the files which have been created or changed since creation. +Size of the files that have been created or changed since creation. [float] == cpu Fields -Runtime cpu metrics. +Runtime CPU metrics. @@ -612,7 +612,7 @@ type: scaled_float format: percentage -The system kernel consumed by The Docker server. +The system kernel consumed by the Docker server. [float] @@ -620,7 +620,7 @@ The system kernel consumed by The Docker server. type: long -Cpu kernel tikcs +CPU kernel ticks. [float] @@ -637,7 +637,7 @@ format: percentage type: long -Cpu system tikcs +CPU system ticks. [float] @@ -654,7 +654,7 @@ format: percentage type: long -Cpu user tikcs +CPU user ticks [float] @@ -664,13 +664,13 @@ type: scaled_float format: percentage -Total cpu usage. +Total CPU usage. [float] == diskio Fields -Diskio metrics. +Disk I/O metrics. @@ -695,14 +695,14 @@ Number of writes. type: scaled_float -Reads and writes numbers combined. +Number of reads and writes combined. [float] == info Fields experimental[] -info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information +Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information. @@ -750,7 +750,7 @@ Total number of existing containers. type: keyword -Unique docker host identifier. +Unique Docker host identifier. [float] @@ -789,7 +789,7 @@ Memory limit. [float] == rss Fields -Rss memory stats. +RSS memory stats. @@ -849,7 +849,7 @@ Total memory usage. [float] == network Fields -Netowrk metrics. +Network metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc index 82eb123..e312d18 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc @@ -1,44 +1,10 @@ [[filtering-and-enhancing-data]] == Filtering and Enhancing the Exported Data -When your use case requires only a subset of the data exported by Metricbeat or you need to add metadata, -you can <>, or you can <>. - - -[float] -[[metricbeat-filtering-overview]] -=== Metricbeat Module Filtering - -Each module accepts a list of filters in its configuration. These filters are -applied to the data generated by the module. These filters are applied to the -module data prior to the addition of the common beat fields like `beat.hostname` -and `type` so they can only be used to filter fields from the module. - -The following example reduces the exported fields of the Redis module to -include only the `redis.info.memory` fields. - -[source,yaml] ----- -metricbeat.modules: -- module: redis - metricsets: ["info"] - period: 1s - hosts: ["127.0.0.1:6379"] - enabled: true - filters: - - include_fields: - fields: ['memory'] ----- - -[float] -[[defining-processors]] -=== Defining Processors - include::../../libbeat/docs/processors.asciidoc[] -For example, the following filters configuration reduces the exported fields by -dropping the `beat.name` and `beat.hostname` fields under `beat`, from all documents. +For example, the following configuration reduces the exported fields by +dropping the `beat.name` and `beat.hostname` fields under `beat` from all documents. [source, yaml] ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc index d2e7d8e..ea57298 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules.asciidoc @@ -7,28 +7,7 @@ This section contains detailed information about the metric collecting modules contained in {beatname_uc}. Each module contains one or multiple metricsets. More details about each module can be found under the links below. - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> +include::modules_list.asciidoc[] --- - -include::modules/apache.asciidoc[] -include::modules/docker.asciidoc[] -include::modules/haproxy.asciidoc[] -include::modules/mongodb.asciidoc[] -include::modules/mysql.asciidoc[] -include::modules/nginx.asciidoc[] -include::modules/postgresql.asciidoc[] -include::modules/redis.asciidoc[] -include::modules/system.asciidoc[] -include::modules/zookeeper.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc index bf916b5..e743c67 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc @@ -3,18 +3,18 @@ This file is generated! See scripts/docs_collector.py //// [[metricbeat-module-docker]] -== docker Module +== Docker Module experimental[] -This is the docker Module. +This module fetches metrics from https://www.docker.com/[Docker] containers. [float] === Example Configuration -The docker module supports the standard configuration options that are described +The Docker module supports the standard configuration options that are described in <>. Here is an example configuration: [source,yaml] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc new file mode 100644 index 0000000..3229b25 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc @@ -0,0 +1,30 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + + * <> + * <> + * <> + * <> + * <> + * <> + * <> + * <> + * <> + * <> + * <> + + +-- + +include::modules/apache.asciidoc[] +include::modules/docker.asciidoc[] +include::modules/haproxy.asciidoc[] +include::modules/kafka.asciidoc[] +include::modules/mongodb.asciidoc[] +include::modules/mysql.asciidoc[] +include::modules/nginx.asciidoc[] +include::modules/postgresql.asciidoc[] +include::modules/redis.asciidoc[] +include::modules/system.asciidoc[] +include::modules/zookeeper.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc index af2bdc7..4e47584 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration.asciidoc @@ -10,7 +10,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> -* <> * <> * <> * <> @@ -20,5 +19,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> * <> +* <> include::configuration/metricbeat-options.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration/metricbeat-options.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration/metricbeat-options.asciidoc index 0a4f06b..a44607d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration/metricbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/reference/configuration/metricbeat-options.asciidoc @@ -62,15 +62,17 @@ A list of tags that will be sent with the metricset event. This setting is optio ===== filters -A list of filters to apply to the data generated by the module. For more detail on how to configure -filters, see <>. +deprecated[5.1,This option will be renamed and changed in a future release] -include::../../../../libbeat/docs/generalconfig.asciidoc[] +A list of filters to apply to the data generated by the module. -include::../../../../libbeat/docs/processors-config.asciidoc[] +include::../../../../libbeat/docs/generalconfig.asciidoc[] include::../../../../libbeat/docs/outputconfig.asciidoc[] include::../../../../libbeat/docs/shared-path-config.asciidoc[] include::../../../../libbeat/docs/loggingconfig.asciidoc[] + +include::../../../../libbeat/docs/processors-config.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc index 07dfbaf..ec0507b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/version.asciidoc @@ -1,2 +1,3 @@ :stack-version: 5.1.1 :doc-branch: 5.1 +:go-version: 1.7.1 diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml index 40bdf7c..e791cb4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.full.yml @@ -67,7 +67,7 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 -#------------------------------- docker Module ------------------------------- +#------------------------------- Docker Module ------------------------------- #- module: docker #metricsets: ["cpu", "info", "memory", "network", "diskio", "container"] #hosts: ["unix:///var/run/docker.sock"] diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json index cc7e9c9..5fb187c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json index dcb6fad..4e5c330 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc index 4ca0b02..1369b66 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc @@ -1,6 +1,6 @@ -== docker Module +== Docker Module experimental[] -This is the docker Module. +This module fetches metrics from https://www.docker.com/[Docker] containers. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml index 868be12..4dabcc0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/fields.yml @@ -1,5 +1,5 @@ - key: docker - title: "docker" + title: "Docker" description: > experimental[] @@ -9,5 +9,5 @@ - name: docker type: group description: > - docker contains different informations and statistics of docker's containers running + Information and statistics about docker's running containers. fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/docs.asciidoc index a3eee73..3c095e4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/docs.asciidoc @@ -1,3 +1,4 @@ -=== docker container MetricSet +=== Docker Container Metricset -This is the container metricset of the module docker. +The Docker `container` metricset collects information and statistics about +running Docker containers. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml index 6aaadc4..cf56b0a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/fields.yml @@ -1,16 +1,16 @@ - name: container type: group description: > - Docker container metrics + Docker container metrics. fields: - name: command type: keyword description: > - Executed command in docker container. + Command that was executed in the Docker container. - name: created type: date description: > - Date then the container was created. + Date when the container was created. - name: id type: keyword description: > @@ -39,4 +39,4 @@ - name: rw type: long description: > - Size of the files which have been created or changed since creation. + Size of the files that have been created or changed since creation. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc index d39b779..16ae506 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/docs.asciidoc @@ -1,3 +1,3 @@ -=== docker cpu MetricSet +=== Docker CPU Metricset -This is the cpu metricset of the module docker. +The Docker `cpu` metricset collects runtime CPU metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml index 5d71f7d..3d7704a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml @@ -1,17 +1,17 @@ - name: cpu type: group description: > - Runtime cpu metrics. + Runtime CPU metrics. fields: - name: kernel.pct type: scaled_float format: percentage description: > - The system kernel consumed by The Docker server. + The system kernel consumed by the Docker server. - name: kernel.ticks type: long description: > - Cpu kernel tikcs + CPU kernel ticks. - name: system.pct type: scaled_float format: percentage @@ -19,7 +19,7 @@ - name: system.ticks type: long description: > - Cpu system tikcs + CPU system ticks. - name: user.pct type: scaled_float format: percentage @@ -27,12 +27,12 @@ - name: user.ticks type: long description: > - Cpu user tikcs + CPU user ticks - name: total.pct type: scaled_float format: percentage description: > - Total cpu usage. + Total CPU usage. # TODO: how to document cpu list? #- name: core # type: list diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc index fd866e9..3bd99f6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/docs.asciidoc @@ -1,3 +1,3 @@ -=== docker diskio MetricSet +=== Docker Diskio Metricset -This is the diskio metricset of the module docker. +The Docker `diskio` metricset collects disk I/O metrics. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml index e5f51ad..540735d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml @@ -1,7 +1,7 @@ - name: diskio type: group description: > - Diskio metrics. + Disk I/O metrics. fields: - name: reads type: scaled_float @@ -14,4 +14,4 @@ - name: total type: scaled_float description: > - Reads and writes numbers combined. + Number of reads and writes combined. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go index 9a3106f..67437ee 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go @@ -64,17 +64,35 @@ func FetchStats(client *docker.Client) ([]Stat, error) { return nil, err } + var wg sync.WaitGroup + containersList := []Stat{} + queue := make(chan Stat, 1) + wg.Add(len(containers)) + for _, container := range containers { - // This is currently very inefficient as docker calculates the average for each request, - // means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148 - // Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361 - containersList = append(containersList, exportContainerStats(client, &container)) + go func(container docker.APIContainers) { + queue <- exportContainerStats(client, &container) + }(container) } + go func() { + for container := range queue { + containersList = append(containersList, container) + wg.Done() + } + }() + + wg.Wait() + return containersList, err } +// exportContainerStats loads stats for the given container +// +// This is currently very inefficient as docker calculates the average for each request, +// means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148 +// Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361 func exportContainerStats(client *docker.Client, container *docker.APIContainers) Stat { var wg sync.WaitGroup var event Stat diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc index e81f04e..fc4b8cb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/docs.asciidoc @@ -1,3 +1,4 @@ -=== docker info MetricSet +=== Docker Info Metricset -This is the info metricset of the module docker. +The Docker `info` metricset collects system-wide information based on the +https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information[Docker Remote API]. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml index 51fc8c7..76dfcb4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/_meta/fields.yml @@ -3,7 +3,7 @@ description: > experimental[] - info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information + Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information. fields: - name: containers type: group @@ -29,7 +29,7 @@ - name: id type: keyword description: > - Unique docker host identifier. + Unique Docker host identifier. - name: images type: long diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc index f880881..0ee70f9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/docs.asciidoc @@ -1,3 +1,3 @@ -=== docker memory MetricSet +=== Docker Memory Metricset -This is the memory metricset of the module docker. +The Docker `memory` metricset collects memory metrics. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml index a26b859..be8b2a8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/_meta/fields.yml @@ -16,7 +16,7 @@ - name: rss type: group description: > - Rss memory stats. + RSS memory stats. fields: - name: total type: long diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc index 6b5e778..78bb5ab 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/docs.asciidoc @@ -1,3 +1,3 @@ -=== docker network MetricSet +=== Docker Network Metricset -This is the network metricset of the module docker. +The Docker `network` metricset collects network metrics. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml index 98a6687..e443e34 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml @@ -1,7 +1,7 @@ - name: network type: group description: > - Netowrk metrics. + Network metrics. fields: - name: interface diff --git a/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py b/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py index 544148f..9fb1699 100644 --- a/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py +++ b/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py @@ -16,6 +16,8 @@ def collect(beat_name): """ + modules_list = {} + # Iterate over all modules for module in sorted(os.listdir(base_dir)): @@ -41,6 +43,8 @@ def collect(beat_name): fields = yaml.load(f.read()) title = fields[0]["title"] + modules_list[module] = title + config_file = beat_path + "/config.yml" # Add example config file @@ -131,6 +135,19 @@ def collect(beat_name): with open(os.path.abspath("docs") + "/modules/" + module + ".asciidoc", 'w') as f: f.write(module_file) + module_list_output = generated_note + for m, title in sorted(modules_list.iteritems()): + module_list_output += " * <>\n" + + module_list_output += "\n\n--\n\n" + for m, title in sorted(modules_list.iteritems()): + module_list_output += "include::modules/"+ m + ".asciidoc[]\n" + + # Write module link list + with open(os.path.abspath("docs") + "/modules_list.asciidoc", 'w') as f: + f.write(module_list_output) + + if __name__ == "__main__": parser = argparse.ArgumentParser( description="Collects modules docs") diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json index c8f7908..1b718de 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/dashboard/Packetbeat-Cassandra.json @@ -1,13 +1,13 @@ { - "hits": 0, - "timeRestore": false, - "description": "", - "title": "Packetbeat Cassandra", - "uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}", - "panelsJSON": "[{\"col\":10,\"id\":\"Cassandra:-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra:-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra:-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra:-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra:-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra:-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra:-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]", - "optionsJSON": "{\"darkTheme\":false}", - "version": 1, + "hits": 0, + "timeRestore": false, + "description": "", + "title": "Packetbeat Cassandra", + "uiStateJSON": "{\"P-10\":{\"vis\":{\"legendOpen\":false}},\"P-17\":{\"vis\":{\"legendOpen\":false}},\"P-18\":{\"vis\":{\"legendOpen\":false}}}", + "panelsJSON": "[{\"col\":10,\"id\":\"Cassandra-ResponseKeyspace\",\"panelIndex\":3,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseType\",\"panelIndex\":4,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseTime\",\"panelIndex\":9,\"row\":5,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-RequestCount\",\"panelIndex\":10,\"row\":1,\"size_x\":9,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Cassandra-Ops\",\"panelIndex\":11,\"row\":3,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountStackByType\",\"panelIndex\":15,\"row\":7,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-ResponseCountStackByType\",\"panelIndex\":16,\"row\":9,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Cassandra-RequestCountByType\",\"panelIndex\":17,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cassandra-ResponseCountByType\",\"panelIndex\":18,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Navigation\",\"panelIndex\":19,\"row\":1,\"size_x\":3,\"size_y\":4,\"type\":\"visualization\"},{\"id\":\"Cassandra-QueryView\",\"type\":\"search\",\"panelIndex\":20,\"size_x\":12,\"size_y\":3,\"col\":1,\"row\":14,\"columns\":[\"cassandra.request.query\",\"cassandra.response.result.rows.meta.keyspace\",\"cassandra.response.result.rows.meta.table\",\"cassandra.response.result.rows.num_rows\"],\"sort\":[\"@timestamp\",\"desc\"]}]", + "optionsJSON": "{\"darkTheme\":false}", + "version": 1, "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}" } -} \ No newline at end of file +} diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc index af01daf..d8d75e1 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc @@ -8,7 +8,7 @@ requests and their response codes are reported: [source, yaml] ----------------------------------------------------- -filters: +processors: - include_fields: fields: - bytes_in diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc index 73f6bd0..8abf934 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration.asciidoc @@ -13,7 +13,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> * <> -* <> * <> * <> * <> @@ -24,6 +23,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> * <> +* <> NOTE: Packetbeat maintains a real-time topology map of all the servers in your network. See <> for more details. diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration/packetbeat-options.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration/packetbeat-options.asciidoc index e62dd3d..75bf100 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration/packetbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/reference/configuration/packetbeat-options.asciidoc @@ -794,8 +794,6 @@ process' command line as read from `/proc//cmdline`. include::../../../../libbeat/docs/generalconfig.asciidoc[] -include::../../../../libbeat/docs/processors-config.asciidoc[] - include::../../../../libbeat/docs/outputconfig.asciidoc[] include::../../../../libbeat/docs/shared-path-config.asciidoc[] @@ -804,3 +802,6 @@ include::../../../../libbeat/docs/loggingconfig.asciidoc[] include::./runconfig.asciidoc[] +include::../../../../libbeat/docs/processors-config.asciidoc[] + + diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc index 07dfbaf..ec0507b 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/version.asciidoc @@ -1,2 +1,3 @@ :stack-version: 5.1.1 :doc-branch: 5.1 +:go-version: 1.7.1 diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json index 02baf69..42b344a 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json index 7ff8407..543d0ca 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/testing/environments/base.yml b/vendor/github.com/elastic/beats/testing/environments/base.yml index adf9866..84e594b 100644 --- a/vendor/github.com/elastic/beats/testing/environments/base.yml +++ b/vendor/github.com/elastic/beats/testing/environments/base.yml @@ -8,17 +8,14 @@ services: - logstash environment: - LS_HOST=logstash - container_name: beat elasticsearch: image: elasticsearch:latest - container_name: elasticsearch logstash: image: logstash:latest links: - elasticsearch - container_name: logstash environment: - ES_HOST=elasticsearch @@ -28,4 +25,3 @@ services: - elasticsearch environment: - ELASTICSEARCH_URL=http://elasticsearch:9200/ - container_name: kibana diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot index 58389f7..5443b62 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile-snapshot @@ -2,7 +2,7 @@ FROM docker.elastic.co/kibana/kibana-ubuntu-base:latest MAINTAINER Elastic Docker Team -ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.1-acecfcb6/downloads/kibana/kibana-5.1.0-linux-x86_64.tar.gz +ARG KIBANA_DOWNLOAD_URL=https://staging.elastic.co/5.1.2-429c1ec3/downloads/kibana/kibana-5.1.2-linux-x86_64.tar.gz ARG X_PACK_URL EXPOSE 5601 diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot index b5ac225..54170f3 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile-snapshot @@ -2,8 +2,8 @@ FROM java:8-jre ENV LS_VERSION 5 -ENV VERSION 5.1.1 -ENV URL https://staging.elastic.co/5.1.1-acecfcb6/downloads/logstash/logstash-${VERSION}.tar.gz +ENV VERSION 5.1.2 +ENV URL https://staging.elastic.co/5.1.2-429c1ec3/downloads/logstash/logstash-5.1.2.tar.gz ENV PATH $PATH:/opt/logstash-$VERSION/bin # Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .` diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml index 5fe6a11..695c5ac 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml @@ -7,8 +7,8 @@ services: context: ./docker/elasticsearch dockerfile: Dockerfile-snapshot args: - ELASTIC_VERSION: 5.1.1 - ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.1-acecfcb6/downloads/elasticsearch' + ELASTIC_VERSION: 5.1.2 + ES_DOWNLOAD_URL: 'https://staging.elastic.co/5.1.2-429c1ec3/downloads/elasticsearch' #XPACK: http://snapshots.elastic.co/downloads/packs/x-pack/x-pack-6.0.0-alpha1-SNAPSHOT.zip environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc index 8b8dbc5..8794d8c 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration.asciidoc @@ -11,7 +11,6 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> -* <> * <> * <> * <> @@ -21,6 +20,7 @@ configuration settings, you need to restart {beatname_uc} to pick up the changes * <> * <> * <> +* <> include::configuration/winlogbeat-options.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc index 20e8c74..d680ea3 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/reference/configuration/winlogbeat-options.asciidoc @@ -163,6 +163,38 @@ winlogbeat.event_logs: event_id: 4624, 4625, 4700-4800, -4735 -------------------------------------------------------------------------------- +[WARNING] +======================================= +If you specify more that 22 event IDs to include or 22 event IDs to exclude, +Windows will prevent Winlogbeat from reading the event log because it limits the +number of conditions that can be used in an event log query. If this occurs a similar +warning as shown below will be logged by Winlogbeat, and it will continue +processing data from other event logs. For more information, see +https://support.microsoft.com/en-us/kb/970453. + +`WARN EventLog[Application] Open() error. No events will be read from this +source. The specified query is invalid.` + +If you have more than 22 event IDs, you can workaround this Windows limitation +by using a drop_event[drop-event] processor to do the filtering after +Winlogbeat has received the events from Windows. The filter shown below is +equivalent to `event_id: 903, 1024, 4624` but can be expanded beyond 22 +event IDs. + +[source,yaml] +-------------------------------------------------------------------------------- +processors: +- drop_event.when.and: + - equals.log_name: Security + - not.or: + - equals.event_id: 903 + - equals.event_id: 1024 + - equals.event_id: 4624 +-------------------------------------------------------------------------------- + +======================================= + + ===== event_logs.level A list of event levels to include. The value is a comma-separated list of @@ -319,10 +351,11 @@ The metrics are served as a JSON document. The metrics include: include::../../../../libbeat/docs/generalconfig.asciidoc[] -include::../../../../libbeat/docs/processors-config.asciidoc[] - include::../../../../libbeat/docs/outputconfig.asciidoc[] include::../../../../libbeat/docs/shared-path-config.asciidoc[] include::../../../../libbeat/docs/loggingconfig.asciidoc[] + +include::../../../../libbeat/docs/processors-config.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc index 07dfbaf..ec0507b 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/version.asciidoc @@ -1,2 +1,3 @@ :stack-version: 5.1.1 :doc-branch: 5.1 +:go-version: 1.7.1 diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go new file mode 100644 index 0000000..8aca84d --- /dev/null +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/bench_test.go @@ -0,0 +1,137 @@ +// +build windows + +package eventlog + +import ( + "flag" + "math/rand" + "os/exec" + "strconv" + "testing" + "time" + + elog "github.com/andrewkroh/sys/windows/svc/eventlog" + "github.com/dustin/go-humanize" +) + +// Benchmark tests with customized output. (`go test -v -benchtime 10s -benchtest .`) + +var ( + benchTest = flag.Bool("benchtest", false, "Run benchmarks for the eventlog package") + injectAmount = flag.Int("inject", 50000, "Number of events to inject before running benchmarks") +) + +// TestBenchmarkBatchReadSize tests the performance of different +// batch_read_size values. +func TestBenchmarkBatchReadSize(t *testing.T) { + if !*benchTest { + t.Skip("-benchtest not enabled") + } + + log, err := initLog(providerName, sourceName, eventCreateMsgFile) + if err != nil { + t.Fatal(err) + } + defer func() { + err := uninstallLog(providerName, sourceName, log) + if err != nil { + t.Fatal(err) + } + }() + + // Increase the log size so that it can hold these large events. + output, err := exec.Command("wevtutil.exe", "sl", "/ms:1073741824", providerName).CombinedOutput() + if err != nil { + t.Fatal(err, string(output)) + } + + // Publish test messages: + for i := 0; i < *injectAmount; i++ { + err = log.Report(elog.Info, uint32(rng.Int63()%1000), []string{strconv.Itoa(i) + " " + randString(256)}) + if err != nil { + t.Fatal("ReportEvent error", err) + } + } + + setup := func(t testing.TB, batchReadSize int) (EventLog, func()) { + eventlog, err := newWinEventLog(map[string]interface{}{"name": providerName, "batch_read_size": batchReadSize}) + if err != nil { + t.Fatal(err) + } + err = eventlog.Open(0) + if err != nil { + t.Fatal(err) + } + return eventlog, func() { + err := eventlog.Close() + if err != nil { + t.Fatal(err) + } + } + } + + benchTest := func(batchSize int) { + var err error + result := testing.Benchmark(func(b *testing.B) { + eventlog, tearDown := setup(b, batchSize) + defer tearDown() + b.ResetTimer() + + // Each iteration reads one batch. + for i := 0; i < b.N; i++ { + _, err = eventlog.Read() + if err != nil { + return + } + } + }) + + if err != nil { + t.Fatal(err) + return + } + + t.Logf("batch_size=%v, total_events=%v, batch_time=%v, events_per_sec=%v, bytes_alloced_per_event=%v, total_allocs=%v", + batchSize, + result.N*batchSize, + time.Duration(result.NsPerOp()), + float64(batchSize)/time.Duration(result.NsPerOp()).Seconds(), + humanize.Bytes(result.MemBytes/(uint64(result.N)*uint64(batchSize))), + result.MemAllocs) + } + + benchTest(10) + benchTest(100) + benchTest(500) + benchTest(1000) +} + +// Utility Functions + +var rng = rand.NewSource(time.Now().UnixNano()) + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = rng.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + + return string(b) +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go index c888a04..2839cfb 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strconv" + "syscall" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" @@ -23,9 +24,14 @@ var ( detailf = logp.MakeDebug(detailSelector) ) -// dropReasons contains counters for the number of dropped events for each -// reason. -var dropReasons = expvar.NewMap("drop_reasons") +var ( + // dropReasons contains counters for the number of dropped events for each + // reason. + dropReasons = expvar.NewMap("drop_reasons") + + // readErrors contains counters for the read error types that occur. + readErrors = expvar.NewMap("read_errors") +) // EventLog is an interface to a Windows Event Log. type EventLog interface { @@ -177,3 +183,17 @@ func isZero(i interface{}) bool { } return false } + +// incrementMetric increments a value in the specified expvar.Map. The key +// should be a windows syscall.Errno or a string. Any other types will be +// reported under the "other" key. +func incrementMetric(v *expvar.Map, key interface{}) { + switch t := key.(type) { + default: + v.Add("other", 1) + case string: + v.Add(t, 1) + case syscall.Errno: + v.Add(strconv.Itoa(int(t)), 1) + } +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go index e220c7c..ba77d70 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go @@ -195,6 +195,7 @@ func (l *eventLogging) Close() error { // by attempting to correct the error through closing and reopening the event // log. func (l *eventLogging) readRetryErrorHandler(err error) error { + incrementMetric(readErrors, err) if errno, ok := err.(syscall.Errno); ok { var reopen bool diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go index 5e2c6d9..98be3c2 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go @@ -4,6 +4,8 @@ package eventlog import ( "fmt" + "os/exec" + "strconv" "strings" "sync" "testing" @@ -35,6 +37,8 @@ const ( const allLevels = elog.Success | elog.AuditFailure | elog.AuditSuccess | elog.Error | elog.Info | elog.Warning +const gigabyte = 1 << 30 + // Test messages. var messages = map[uint32]struct { eventType uint16 @@ -72,7 +76,7 @@ var oneTimeLogpInit sync.Once func configureLogp() { oneTimeLogpInit.Do(func() { if testing.Verbose() { - logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"eventlog", "eventlog_detail"}) + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"eventlog"}) logp.Info("DEBUG enabled for eventlog.") } else { logp.LogInit(logp.LOG_WARNING, "", false, true, []string{}) @@ -143,6 +147,14 @@ func uninstallLog(provider, source string, log *elog.Log) error { return errs.Err() } +// setLogSize set the maximum number of bytes that an event log can hold. +func setLogSize(t testing.TB, provider string, sizeBytes int) { + output, err := exec.Command("wevtutil.exe", "sl", "/ms:"+strconv.Itoa(sizeBytes), providerName).CombinedOutput() + if err != nil { + t.Fatal("failed to set log size", err, string(output)) + } +} + // Verify that all messages are read from the event log. func TestRead(t *testing.T) { diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go index 7340e2b..7caab61 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go @@ -4,7 +4,6 @@ package eventlog import ( "fmt" - "strconv" "syscall" "time" @@ -13,6 +12,7 @@ import ( "github.com/elastic/beats/winlogbeat/sys" win "github.com/elastic/beats/winlogbeat/sys/wineventlog" "github.com/joeshaw/multierror" + "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -73,6 +73,7 @@ type winEventLog struct { channelName string // Name of the channel from which to read. subscription win.EvtHandle // Handle to the subscription. maxRead int // Maximum number returned in one Read. + lastRead uint64 // Record number of the last read event. render func(event win.EvtHandle) (string, error) // Function for rendering the event to XML. renderBuf []byte // Buffer used for rendering event. @@ -118,13 +119,8 @@ func (l *winEventLog) Open(recordNumber uint64) error { } func (l *winEventLog) Read() ([]Record, error) { - handles, err := win.EventHandles(l.subscription, l.maxRead) - if err == win.ERROR_NO_MORE_ITEMS { - detailf("%s No more events", l.logPrefix) - return nil, nil - } - if err != nil { - logp.Warn("%s EventHandles returned error %v", l.logPrefix, err) + handles, _, err := l.eventHandles(l.maxRead) + if err != nil || len(handles) == 0 { return nil, err } defer func() { @@ -145,17 +141,18 @@ func (l *winEventLog) Read() ([]Record, error) { } if err != nil && x == "" { logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err) - reportDrop(err) + incrementMetric(dropReasons, err) continue } r, err := l.buildRecordFromXML(x, err) if err != nil { logp.Err("%s Dropping event. %v", l.logPrefix, err) - reportDrop("unmarshal") + incrementMetric(dropReasons, err) continue } records = append(records, r) + l.lastRead = r.RecordID } debugf("%s Read() is returning %d records", l.logPrefix, len(records)) @@ -167,6 +164,34 @@ func (l *winEventLog) Close() error { return win.Close(l.subscription) } +func (l *winEventLog) eventHandles(maxRead int) ([]win.EvtHandle, int, error) { + handles, err := win.EventHandles(l.subscription, maxRead) + switch err { + case nil: + if l.maxRead > maxRead { + debugf("%s Recovered from RPC_S_INVALID_BOUND error (errno 1734) "+ + "by decreasing batch_read_size to %v", l.logPrefix, maxRead) + } + return handles, maxRead, nil + case win.ERROR_NO_MORE_ITEMS: + detailf("%s No more events", l.logPrefix) + return nil, maxRead, nil + case win.RPC_S_INVALID_BOUND: + incrementMetric(readErrors, err) + if err := l.Close(); err != nil { + return nil, 0, errors.Wrap(err, "failed to recover from RPC_S_INVALID_BOUND") + } + if err := l.Open(l.lastRead); err != nil { + return nil, 0, errors.Wrap(err, "failed to recover from RPC_S_INVALID_BOUND") + } + return l.eventHandles(maxRead / 2) + default: + incrementMetric(readErrors, err) + logp.Warn("%s EventHandles returned error %v", l.logPrefix, err) + return nil, 0, err + } +} + func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, error) { e, err := sys.UnmarshalEventXML([]byte(x)) if err != nil { @@ -204,20 +229,6 @@ func (l *winEventLog) buildRecordFromXML(x string, recoveredErr error) (Record, return r, nil } -// reportDrop reports a dropped event log record and the reason as an expvar -// metric. The reason should be a windows syscall.Errno or a string. Any other -// types will be reported under the "other" key. -func reportDrop(reason interface{}) { - switch t := reason.(type) { - default: - dropReasons.Add("other", 1) - case string: - dropReasons.Add(t, 1) - case syscall.Errno: - dropReasons.Add(strconv.Itoa(int(t)), 1) - } -} - // newWinEventLog creates and returns a new EventLog for reading event logs // using the Windows Event Log. func newWinEventLog(options map[string]interface{}) (EventLog, error) { @@ -283,7 +294,7 @@ func newWinEventLog(options map[string]interface{}) (EventLog, error) { } func init() { - // Register eventlogging API if it is available. + // Register wineventlog API if it is available. available, _ := win.IsAvailable() if available { Register(winEventLogAPIName, 0, newWinEventLog, win.Channels) diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go index af74a1b..6889647 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog_test.go @@ -3,8 +3,11 @@ package eventlog import ( + "expvar" + "strconv" "testing" + elog "github.com/andrewkroh/sys/windows/svc/eventlog" "github.com/stretchr/testify/assert" ) @@ -52,3 +55,69 @@ func TestWinEventLogBatchReadSize(t *testing.T) { assert.Len(t, records, batchReadSize) } + +// TestReadLargeBatchSize tests reading from an event log using a large +// read_batch_size parameter. When combined with large messages this causes +// EvtNext (wineventlog.EventRecords) to fail with RPC_S_INVALID_BOUND error. +func TestReadLargeBatchSize(t *testing.T) { + configureLogp() + log, err := initLog(providerName, sourceName, eventCreateMsgFile) + if err != nil { + t.Fatal(err) + } + defer func() { + err := uninstallLog(providerName, sourceName, log) + if err != nil { + t.Fatal(err) + } + }() + + setLogSize(t, providerName, gigabyte) + + // Publish large test messages. + totalEvents := 1000 + for i := 0; i < totalEvents; i++ { + err = log.Report(elog.Info, uint32(i%1000), []string{strconv.Itoa(i) + " " + randString(31800)}) + if err != nil { + t.Fatal("ReportEvent error", err) + } + } + + eventlog, err := newWinEventLog(map[string]interface{}{"name": providerName, "batch_read_size": 1024}) + if err != nil { + t.Fatal(err) + } + err = eventlog.Open(0) + if err != nil { + t.Fatal(err) + } + defer func() { + err := eventlog.Close() + if err != nil { + t.Fatal(err) + } + }() + + var eventCount int + for eventCount < totalEvents { + records, err := eventlog.Read() + if err != nil { + t.Fatal("read error", err) + } + if len(records) == 0 { + t.Fatal("read returned 0 records") + } + eventCount += len(records) + } + + t.Logf("number of records returned: %v", eventCount) + + wineventlog := eventlog.(*winEventLog) + assert.Equal(t, 1024, wineventlog.maxRead) + + expvar.Do(func(kv expvar.KeyValue) { + if kv.Key == "read_errors" { + t.Log(kv) + } + }) +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go index 3ee3f73..a02d62e 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/syscall_windows.go @@ -13,6 +13,7 @@ const ( ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 ERROR_NO_MORE_ITEMS syscall.Errno = 259 ERROR_NONE_MAPPED syscall.Errno = 1332 + RPC_S_INVALID_BOUND syscall.Errno = 1734 ERROR_INVALID_OPERATION syscall.Errno = 4317 ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go index f4f3845..138c0da 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go @@ -125,6 +125,10 @@ func Subscribe( // handles available to return. Close must be called on each returned EvtHandle // when finished with the handle. func EventHandles(subscription EvtHandle, maxHandles int) ([]EvtHandle, error) { + if maxHandles < 1 { + return nil, fmt.Errorf("maxHandles must be greater than 0") + } + eventHandles := make([]EvtHandle, maxHandles) var numRead uint32 diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json index 56b6ea1..c3e7ebc 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template-es2x.json @@ -7,7 +7,7 @@ } }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ { diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json index 5491615..a3ac73b 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.template.json @@ -5,7 +5,7 @@ "norms": false }, "_meta": { - "version": "5.1.1" + "version": "5.1.2" }, "dynamic_templates": [ {