diff --git a/.env b/.env new file mode 100644 index 0000000..dceddc3 --- /dev/null +++ b/.env @@ -0,0 +1,22 @@ +ELASTIC_VERSION=8.1.3 + +## Passwords for stack users +# + +# User 'elastic' (built-in) +# +# Superuser role, full access to cluster management and data indices. +# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +ELASTIC_PASSWORD='elasticheart' + +# User 'logstash_internal' (custom) +# +# The user Logstash uses to connect and send data to Elasticsearch. +# https://www.elastic.co/guide/en/logstash/current/ls-security.html +LOGSTASH_INTERNAL_PASSWORD='stashthemanylogs' + +# User 'kibana_system' (built-in) +# +# The user Kibana uses to connect and communicate with Elasticsearch. +# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +KIBANA_SYSTEM_PASSWORD='kibanarama' diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..271def0 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +DOCKER_REGISTRY := placeholder.github.com +DOCKER_IMAGE_TAG := latest +DOCKER_IMAGE_PATH := tattle-tale:$(DOCKER_IMAGE_TAG) +DOCKER_SAVE_FILE := tattle-tale-$(DOCKER_IMAGE_TAG) + +docker-build: + docker build -t $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_PATH) . + +docker-push: + docker login $(DOCKER_REGISTRY) + docker push $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_PATH) + +docker-save: + docker save $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_PATH) | gzip > $(DOCKER_SAVE_FILE).gz + diff --git a/README.md b/README.md index e567018..3de652d 100644 --- a/README.md +++ b/README.md @@ -22,9 +22,9 @@ Copy the files from the `filebeat` directory and put them into `/etc/filebeat` Copy the file `delete_old_indices.sh` from the `cron.daily` directory to `/etc/cron.daily` and make the file executable (`chmod 755 /etc/cron.daily/delete_old_indices.sh`) -Create the `/opt/tattle_tale` directory `sudo mkdir /opt/tattle_tale` +Create the `/opt/tattle-tale` directory `sudo mkdir /opt/tattle-tale` -Copy the `tattle_shadow.py`, `tattle_snmp_poll.py` and `tattle_tale_cfg.py` files to the `/opt/tattle_tale` directory and make `tattle_shadow.py` and `tattle_snmp_poll.py` executable (`chmod 755 `) +Copy the `tattle_shadow.py`, `tattle_snmp_poll.py` and `tattle_tale_cfg.py` files to the `/opt/tattle-tale/bin` directory and make `tattle_shadow.py` and `tattle_snmp_poll.py` executable (`chmod 755 `) Rename the `netflow.yml.disabled` file to `netflow.yml` in `/etc/filebeat/modules.d` Enable the filebeat module `sudo filebeat modules enable netflow` @@ -42,7 +42,7 @@ Edit the `tattle_tale_cfg.py` file and populate these fields: `snmp_community = ""` -Create the file `/opt/tattle_tale/router_list.txt` and put in the IPs of routers that will be polled (one router per line). +Create the file `/opt/tattle-tale/etc/router_list.txt` and put in the IPs of routers that will be polled (one router per line). Restart the ELK stack daemons: diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..3275704 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,129 @@ +version: '3.7' + +networks: + tattle-tale: + driver: bridge + +volumes: + setup: + elasticsearch: + +services: + setup: + # The 'setup' service runs a one-off script which initializes the + # 'logstash_internal' and 'kibana_system' users inside Elasticsearch with the + # values of the passwords defined in the '.env' file. + # + # This task is only performed during the *initial* startup of the stack. On all + # subsequent runs, the service simply returns immediately, without performing + # any modification to existing users. + # + # See https://github.com/deviantony/docker-elk#setting-up-user-authentication + build: + context: setup/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + init: true + volumes: + - setup:/state:Z + environment: + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} + LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} + KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-} + networks: + - tattle-tale + + elasticsearch: + build: + context: elasticsearch/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - type: volume + source: elasticsearch + target: /usr/share/elasticsearch/data + environment: + ES_JAVA_OPTS: -Xmx256m -Xms256m + # Bootstrap password. + # Used to initialize the keystore during the initial startup of + # Elasticsearch. Ignored on subsequent runs. + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} + # Use single node discovery in order to disable production mode and avoid bootstrap checks. + # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html + discovery.type: single-node + networks: + - tattle-tale + + logstash: + build: + context: logstash/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + volumes: + - type: bind + target: /usr/share/logstash/pipeline + source: ./logstash/pipeline + read_only: true + - type: bind + # This directory must be mapped to TattleTale's /opt/tattle-tale/lib/logstash directory + # source: ${TT_HOST_LIB_DIRECTORY}/logstash + target: /usr/share/logstash/tattle-tale + source: ./lib/logstash + read_only: true + environment: + LS_JAVA_OPTS: -Xmx256m -Xms256m + LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} + networks: + - tattle-tale + depends_on: + - elasticsearch + + kibana: + build: + context: kibana/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + ports: + # Replace 0.0.0.0 with the IP address you want the web interface to run on + - "0.0.0.0:5601:5601" + environment: + KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-} + networks: + - tattle-tale + depends_on: + - elasticsearch + + filebeat: + # See https://www.elastic.co/guide/en/beats/filebeat/8.2/running-on-docker.html#running-on-docker + build: + context: filebeat/ + args: + ELASTIC_VERSION: ${ELASTIC_VERSION} + ports: + # Replace 0.0.0.0 with the IP of the interface receiving netflow + - 0.0.0.0:2055:2055/udp + networks: + - tattle-tale + depends_on: + - logstash + + fletch: + # Fetches and flushes things + build: + context: fletch/ + environment: + TT_SHADOW_USER: BLAH + TT_SHADOW_PASS: BLAH + TT_SNMP_COMMUNITY_STRING: BLAH + # Change to match your naming convention + # BTW, https://regex101.com is really good for testing regex + TT_INT_DESCRIPTION_PEER_NAME_REGEX: '\[NAME=(.+?)\]' + volumes: + - type: bind + # This lib directory must also be mapped to logstash's /usr/share/logstash/tattle-tale + # source: ${TT_HOST_LIB_DIRECTORY} + source: ./lib + target: /opt/tattle-tale/lib + read_only: false + networks: + - tattle-tale diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile new file mode 100644 index 0000000..d824a0f --- /dev/null +++ b/elasticsearch/Dockerfile @@ -0,0 +1,9 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION} + +COPY elasticsearch.yml /usr/share/elasticsearch/config/elasticsearch.yml + +# Add your elasticsearch plugins setup here +# Example: RUN elasticsearch-plugin install analysis-icu diff --git a/elasticsearch/elasticsearch.yml b/elasticsearch/elasticsearch.yml index b70f225..3439c94 100644 --- a/elasticsearch/elasticsearch.yml +++ b/elasticsearch/elasticsearch.yml @@ -1,4 +1,18 @@ -path.data: /var/lib/elasticsearch -path.logs: /var/log/elasticsearch -indices.query.bool.max_clause_count: 8192 -search.max_buckets: 100000 +--- +## Default Elasticsearch configuration from Elasticsearch base image. +## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml +# +cluster.name: "docker-cluster" +network.host: 0.0.0.0 + +## X-Pack settings +## see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html +# +xpack.license.self_generated.type: trial +xpack.security.enabled: true + +#xpack.license.self_generated.type: basic +#xpack.security.enabled: false +#xpack.security.transport.ssl.enabled: false +#xpack.security.http.ssl.enabled: false +#xpack.monitoring.collection.enabled: true diff --git a/filebeat/Dockerfile b/filebeat/Dockerfile new file mode 100644 index 0000000..4231536 --- /dev/null +++ b/filebeat/Dockerfile @@ -0,0 +1,12 @@ +ARG ELASTIC_VERSION + +FROM docker.elastic.co/beats/filebeat:${ELASTIC_VERSION} + +RUN mv modules.d/netflow.yml.disabled modules.d/netflow.yml +RUN filebeat modules enable netflow +RUN sed -i "s/enabled: false/enabled: true/g" modules.d/netflow.yml +RUN sed -i "s/netflow_host: localhost/netflow_host: 0.0.0.0/g" modules.d/netflow.yml + +COPY filebeat.yml ./ + +CMD filebeat -e -c ./filebeat.yml --path.home /usr/share/filebeat --path.config /usr/share/filebeat \ No newline at end of file diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index b388181..8e77a24 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -14,7 +14,7 @@ setup.template.settings: index.number_of_shards: 1 setup.kibana: output.logstash: - hosts: ["localhost:5044"] + hosts: ["logstash:5044"] processors: - add_host_metadata: when.not.contains.tags: forwarded diff --git a/fletch/Dockerfile b/fletch/Dockerfile new file mode 100644 index 0000000..b2d4a1d --- /dev/null +++ b/fletch/Dockerfile @@ -0,0 +1,36 @@ +# Use an official Python runtime as a parent image +FROM python:3.6-slim + +WORKDIR /opt/tattle-tale + +RUN mkdir -pv bin etc lib/logstash var/downloads var/tmp + +# Install/update system packages +RUN apt-get update ; apt-get -y install libsmi-dev gcc curl cron +RUN pip install elasticsearch-curator +COPY curator/curator.yml curator/delete_old_indices.yml etc/ + +# Install any needed packages specified in requirements.txt +COPY requirements.txt ./ +RUN pip3 install --trusted-host pypi.python.org -r requirements.txt + +# Install our bits & pieces +COPY *.py *.sh bin/ +RUN chmod +x bin/* + +# RUN curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.13.4-amd64.deb +# RUN dpkg -i filebeat-7.13.4-amd64.deb ; rm filebeat-7.13.4-amd64.deb + +# Setup cron to run the update scripts +COPY cron/cron.daily/* /etc/cron.daily/ +RUN chmod +x /etc/cron.daily/* + +COPY cron/cron.hourly/* /etc/cron.hourly/ +RUN chmod +x /etc/cron.hourly/* + +RUN mkdir /etc/cron.minutely + +COPY cron/crontab /etc/crontab +RUN crontab /etc/crontab + +CMD /opt/tattle-tale/bin/entrypoint.sh diff --git a/cron.daily/delete_old_indices.sh b/fletch/cron/cron.daily/delete_old_indices.sh similarity index 62% rename from cron.daily/delete_old_indices.sh rename to fletch/cron/cron.daily/delete_old_indices.sh index 7acb18e..6889d50 100644 --- a/cron.daily/delete_old_indices.sh +++ b/fletch/cron/cron.daily/delete_old_indices.sh @@ -3,4 +3,4 @@ # Script to delete elasticsearch netflow-* indicies older than 45 days # Put this file in /etc/cron.daily/ and make sure it's executable (chmod 755 delete_old_indicies.sh) -/bin/curator /etc/curator/delete_old_indices.yml --config /etc/curator/curator.yml +/usr/local/bin/curator /opt/tattle-tale/etc/delete_old_indices.yml --config /opt/tattle-tale/etc/curator.yml diff --git a/cron.daily/tattle_tale_shadow.sh b/fletch/cron/cron.daily/tattle_tale_shadow.sh similarity index 81% rename from cron.daily/tattle_tale_shadow.sh rename to fletch/cron/cron.daily/tattle_tale_shadow.sh index aa8d1ca..1f19500 100644 --- a/cron.daily/tattle_tale_shadow.sh +++ b/fletch/cron/cron.daily/tattle_tale_shadow.sh @@ -3,4 +3,4 @@ # Run the script to pull down the shadowserver files and then create dictionary files for each # The logstash filters then use this to filter out events -/opt/tattle_tale/tattle_shadow.py +/opt/tattle-tale/bin/tattle_shadow.py diff --git a/fletch/cron/cron.hourly/tattle-tale-status.sh b/fletch/cron/cron.hourly/tattle-tale-status.sh new file mode 100644 index 0000000..2754f91 --- /dev/null +++ b/fletch/cron/cron.hourly/tattle-tale-status.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +datestring=$(date) +echo "TattleTale status at $datestring: RUNNING" diff --git a/fletch/cron/crontab b/fletch/cron/crontab new file mode 100644 index 0000000..0161a79 --- /dev/null +++ b/fletch/cron/crontab @@ -0,0 +1,16 @@ +SHELL=/bin/sh +PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + +# Example of job definition: +# .---------------- minute (0 - 59) +# | .------------- hour (0 - 23) +# | | .---------- day of month (1 - 31) +# | | | .------- month (1 - 12) OR jan,feb,mar,apr ... +# | | | | .---- day of week (0 - 6) (Sunday=0 or 7) OR sun,mon,tue,wed,thu,fri,sat +# | | | | | +# * * * * * command to be executed +25 * * * * /etc/cron.hourly/tattle-tale-status.sh > /proc/1/fd/1 2>/proc/1/fd/2 +30 6 * * * /etc/cron.daily/delete_old_indices.sh > /proc/1/fd/1 2>/proc/1/fd/2 +35 6 * * * /etc/cron.daily/tattle-tale-shadow.sh > /proc/1/fd/1 2>/proc/1/fd/2 +# 45 6 * * 7 /etc/cron.weekly/blah.sh > /proc/1/fd/1 2>/proc/1/fd/2 +# 50 6 1 * ) /etc/cron.monthly/blah.sh > /proc/1/fd/1 2>/proc/1/fd/2 diff --git a/curator/curator.yml b/fletch/curator/curator.yml similarity index 93% rename from curator/curator.yml rename to fletch/curator/curator.yml index 3745ffa..4814a40 100644 --- a/curator/curator.yml +++ b/fletch/curator/curator.yml @@ -1,6 +1,6 @@ client: hosts: - - 127.0.0.1 + - elasticsearch port: 9200 url_prefix: use_ssl: False diff --git a/curator/delete_old_indices.yml b/fletch/curator/delete_old_indices.yml similarity index 100% rename from curator/delete_old_indices.yml rename to fletch/curator/delete_old_indices.yml diff --git a/fletch/entrypoint.sh b/fletch/entrypoint.sh new file mode 100644 index 0000000..b50e8ce --- /dev/null +++ b/fletch/entrypoint.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -eu + +echo "TattleTale status at $(date): STARTING..." + +echo "Fetching current ShadowServer report..." +/opt/tattle-tale/bin/tattle_shadow.py +echo "Completed processing ShadowServer report (return code $?)." + +# TODO: Fetch IP report from DIS +echo "Cron schedule:" +crontab -l + +echo "TattleTale status at $(date): STARTED" +echo "Starting cron..." +cron -f -L 8 + +echo "exited $0" \ No newline at end of file diff --git a/fletch/requirements.txt b/fletch/requirements.txt new file mode 100644 index 0000000..07f8cba --- /dev/null +++ b/fletch/requirements.txt @@ -0,0 +1,26 @@ +# +# Output generated via "pipdeptree -f > requirements.txt" +# +# Example setup using python3 venv: +# +# python3 -m venv ./venv +# source venv/bin/activate +# pip install -r requirements.txt +# + +pipdeptree==2.0.0 + pip==21.1.3 +requests==2.26.0 + certifi==2021.5.30 + charset-normalizer==2.0.3 + idna==3.2 + urllib3==1.26.6 +snimpy==1.0.0 + cffi==1.14.6 + pycparser==2.20 + pysnmp==4.4.12 + pyasn1==0.4.8 + pycryptodomex==3.10.1 + pysmi==0.3.4 + ply==3.11 + setuptools==44.0.0 diff --git a/tattle_shadow.py b/fletch/tattle_shadow.py similarity index 97% rename from tattle_shadow.py rename to fletch/tattle_shadow.py index a9058fc..abf1527 100644 --- a/tattle_shadow.py +++ b/fletch/tattle_shadow.py @@ -130,6 +130,9 @@ def copy_files(source_dir, dest_dir): session = requests.Session() credentials = {'user': cfg.shadow_user, 'password': cfg.shadow_pass, 'login':'Login'} response = session.post(cfg.shadow_url, data=credentials) +print(f"Got response downloading ShadowServer report: {response.text} (status code {response.status_code})") +if response.status_code != 200: + sys.exit(1) yester_day_month = find_yesterday() urls_files = find_links(yester_day_month[0], yester_day_month[1], response.text) diff --git a/tattle_snmp_poll.py b/fletch/tattle_snmp_poll.py similarity index 100% rename from tattle_snmp_poll.py rename to fletch/tattle_snmp_poll.py diff --git a/fletch/tattle_tale_cfg.py b/fletch/tattle_tale_cfg.py new file mode 100644 index 0000000..faf32ef --- /dev/null +++ b/fletch/tattle_tale_cfg.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 + +import os + +# Username for Shadowserver authentication +shadow_user = os.getenv("TT_SHADOW_USER") + +# Password for Shadowserver authentication +shadow_pass = os.getenv("TT_SHADOW_PASS") + +# URL for Shadowserver download list +shadow_url = os.getenv("TT_SHADOW_REPORT_URL", default="https://dl.shadowserver.org/reports/index.php") + +# Directory to store Shadowserver downloads +# Must include the trailing / +shadow_dir = os.getenv("TT_SHADOW_REPORT_DOWNLOAD_DIR", default="/opt/tattle-tale/var/downloads/") + +# Directory to store new dictionaries +# Must include the trailing / +shadow_temp_dir = os.getenv("TT_SHADOW_REPORT_TEMP_DIR", default="/opt/tattle-tale/var/tmp/") + +# Dictionary location for Logstash +# Must include the trailing / +logstash_dict_dir = os.getenv("TT_SHADOW_REPORT_DICT_DIR", default="/opt/tattle-tale/lib/logstash/") + +# File containing list of IP addresses to query via SNMP +# TODO: Potentially support run-time modification? +device_list = os.getenv("TT_ROUTER_LIST_FILE", default="/opt/tattle-tale/lib/router-list.txt") + +# SNMPv2c community string +snmp_community = os.getenv("TT_SNMP_COMMUNITY_STRING") + +# Regular Expression to pull Peer name from interface description +if_regex = os.getenv("TT_INT_DESCRIPTION_PEER_NAME_REGEX") + diff --git a/kibana/Dockerfile b/kibana/Dockerfile new file mode 100644 index 0000000..ec5ff52 --- /dev/null +++ b/kibana/Dockerfile @@ -0,0 +1,9 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/kibana/kibana:${ELASTIC_VERSION} + +COPY kibana.yml /usr/share/kibana/config/ + +# Add your kibana plugins setup here +# Example: RUN kibana-plugin install diff --git a/kibana/kibana.yml b/kibana/kibana.yml index 2677162..07ab33e 100644 --- a/kibana/kibana.yml +++ b/kibana/kibana.yml @@ -1,2 +1,13 @@ -server.host: "localhost" -server.name: "" +--- +## Default Kibana configuration from Kibana base image. +## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.ts +# +server.name: kibana +server.host: 0.0.0.0 +elasticsearch.hosts: [ "http://elasticsearch:9200" ] +monitoring.ui.container.elasticsearch.enabled: true + +## X-Pack security credentials +# +elasticsearch.username: kibana_system +elasticsearch.password: ${KIBANA_SYSTEM_PASSWORD} diff --git a/lib/logstash/ard.yml b/lib/logstash/ard.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/chargen.yml b/lib/logstash/chargen.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/coap.yml b/lib/logstash/coap.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/db2.yml b/lib/logstash/db2.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/dns.yml b/lib/logstash/dns.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/ifName.yml b/lib/logstash/ifName.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/ldap.yml b/lib/logstash/ldap.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/mdns.yml b/lib/logstash/mdns.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/memcached.yml b/lib/logstash/memcached.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/mssql.yml b/lib/logstash/mssql.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/netbios.yml b/lib/logstash/netbios.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/ntp.yml b/lib/logstash/ntp.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/ntpmonitor.yml b/lib/logstash/ntpmonitor.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/portmapper.yml b/lib/logstash/portmapper.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/qotd.yml b/lib/logstash/qotd.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/rdpeudp.yml b/lib/logstash/rdpeudp.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/snmp.yml b/lib/logstash/snmp.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/ssdp.yml b/lib/logstash/ssdp.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/tftp.yml b/lib/logstash/tftp.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/ubiquiti.yml b/lib/logstash/ubiquiti.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/logstash/xdmcp.yml b/lib/logstash/xdmcp.yml new file mode 100644 index 0000000..e69de29 diff --git a/lib/router-list.txt b/lib/router-list.txt new file mode 100644 index 0000000..e69de29 diff --git a/logstash/Dockerfile b/logstash/Dockerfile new file mode 100644 index 0000000..ecc0ff1 --- /dev/null +++ b/logstash/Dockerfile @@ -0,0 +1,11 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/logstash/logstash:${ELASTIC_VERSION} + +COPY logstash.yml /usr/share/logstash/config/ +COPY pipelines.yml /usr/share/logstash/config/ +RUN mkdir -p /usr/share/logstash/tattle-tale/ + +# Add your logstash plugins setup here +# Example: RUN logstash-plugin install logstash-filter-json diff --git a/logstash/logstash.yml b/logstash/logstash.yml new file mode 100644 index 0000000..47722ea --- /dev/null +++ b/logstash/logstash.yml @@ -0,0 +1,5 @@ +--- +## Default Logstash configuration from Logstash base image. +## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml +# +http.host: "0.0.0.0" diff --git a/logstash/conf.d/10-input.conf b/logstash/pipeline/10-input.conf similarity index 76% rename from logstash/conf.d/10-input.conf rename to logstash/pipeline/10-input.conf index 93e9423..89a9b36 100644 --- a/logstash/conf.d/10-input.conf +++ b/logstash/pipeline/10-input.conf @@ -2,7 +2,7 @@ input { beats { - host => "localhost" + host => "0.0.0.0" port => 5044 } } diff --git a/logstash/conf.d/20-filter.conf b/logstash/pipeline/20-filter.conf similarity index 100% rename from logstash/conf.d/20-filter.conf rename to logstash/pipeline/20-filter.conf diff --git a/logstash/conf.d/30-shadowserver.conf b/logstash/pipeline/30-shadowserver.conf similarity index 70% rename from logstash/conf.d/30-shadowserver.conf rename to logstash/pipeline/30-shadowserver.conf index 2b2759a..ce96f4f 100644 --- a/logstash/conf.d/30-shadowserver.conf +++ b/logstash/pipeline/30-shadowserver.conf @@ -4,7 +4,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/qotd.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/qotd.yml' } } @@ -12,7 +12,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/chargen.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/chargen.yml' } } @@ -20,7 +20,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/dns.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/dns.yml' } } @@ -28,7 +28,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/tftp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/tftp.yml' } } @@ -36,7 +36,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/portmapper.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/portmapper.yml' } } @@ -44,7 +44,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ntp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ntp.yml' } } @@ -52,7 +52,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ntpmonitor.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ntpmonitor.yml' } } @@ -60,7 +60,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/netbios.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/netbios.yml' } } @@ -68,7 +68,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/snmp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/snmp.yml' } } @@ -76,7 +76,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/xdmcp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/xdmcp.yml' } } @@ -84,7 +84,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ldap.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ldap.yml' } } @@ -92,7 +92,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/db2.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/db2.yml' } } @@ -100,7 +100,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/mssql.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/mssql.yml' } } @@ -108,7 +108,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ssdp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ssdp.yml' } } @@ -116,7 +116,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ard.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ard.yml' } } @@ -124,7 +124,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/rdpeudp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/rdpeudp.yml' } } @@ -132,7 +132,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/mdns.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/mdns.yml' } } @@ -140,7 +140,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/coap.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/coap.yml' } } @@ -148,7 +148,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ubiquiti.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ubiquiti.yml' } } @@ -156,7 +156,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/memcached.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/memcached.yml' } } @@ -164,7 +164,7 @@ filter { translate { field => "[destination][ip]" destination => "[netflow][open_udp]" - dictionary_path => '/etc/logstash/dictionaries/ssdp.yml' + dictionary_path => '/usr/share/logstash/tattle-tale/ssdp.yml' } } diff --git a/logstash/conf.d/30-src-80-443.conf.disabled b/logstash/pipeline/30-src-80-443.conf.disabled similarity index 100% rename from logstash/conf.d/30-src-80-443.conf.disabled rename to logstash/pipeline/30-src-80-443.conf.disabled diff --git a/logstash/conf.d/40-ifName.conf b/logstash/pipeline/40-ifName.conf similarity index 88% rename from logstash/conf.d/40-ifName.conf rename to logstash/pipeline/40-ifName.conf index fe7bdcb..d68cf69 100644 --- a/logstash/conf.d/40-ifName.conf +++ b/logstash/pipeline/40-ifName.conf @@ -7,7 +7,7 @@ filter { } translate { id => "netflow_postproc_translate_input_ifname" - dictionary_path => "/etc/logstash/dictionaries/ifName.yml" + dictionary_path => "/usr/share/logstash/tattle-tale/ifName.yml" field => "[@metadata][in_if_key]" destination => "[netflow][input_ifname]" fallback => "index: %{[netflow][ingress_interface]}" diff --git a/logstash/conf.d/50-reverse-dns.conf b/logstash/pipeline/50-reverse-dns.conf similarity index 100% rename from logstash/conf.d/50-reverse-dns.conf rename to logstash/pipeline/50-reverse-dns.conf diff --git a/logstash/conf.d/60-service-type.conf b/logstash/pipeline/60-service-type.conf similarity index 100% rename from logstash/conf.d/60-service-type.conf rename to logstash/pipeline/60-service-type.conf diff --git a/logstash/conf.d/81-filter-scanners.conf b/logstash/pipeline/81-filter-scanners.conf similarity index 100% rename from logstash/conf.d/81-filter-scanners.conf rename to logstash/pipeline/81-filter-scanners.conf diff --git a/logstash/conf.d/99-output.conf b/logstash/pipeline/99-output.conf similarity index 58% rename from logstash/conf.d/99-output.conf rename to logstash/pipeline/99-output.conf index bfb88c4..6cf0648 100644 --- a/logstash/conf.d/99-output.conf +++ b/logstash/pipeline/99-output.conf @@ -2,10 +2,10 @@ output { elasticsearch { - hosts => ["http://localhost:9200"] + hosts => ["http://elasticsearch:9200"] #index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" index => "netflow-%{+YYYY.MM.dd}" - #user => "elastic" - #password => "changeme" + user => "logstash_internal" + password => "${LOGSTASH_INTERNAL_PASSWORD}" } } diff --git a/logstash/pipelines.yml b/logstash/pipelines.yml new file mode 100644 index 0000000..3a5c3af --- /dev/null +++ b/logstash/pipelines.yml @@ -0,0 +1,6 @@ +# This file is where you define your pipelines. You can define multiple. +# For more information on multiple pipelines, see the documentation: +# https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html + +- pipeline.id: main + path.config: "/usr/share/logstash/pipeline/*.conf" diff --git a/setup/Dockerfile b/setup/Dockerfile new file mode 100644 index 0000000..0b2ef90 --- /dev/null +++ b/setup/Dockerfile @@ -0,0 +1,17 @@ +ARG ELASTIC_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION} + +USER root + +COPY . / + +RUN set -eux; \ + mkdir /state; \ + chown elasticsearch /state; \ + chmod +x /entrypoint.sh + +USER elasticsearch:root + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/setup/entrypoint.sh b/setup/entrypoint.sh new file mode 100755 index 0000000..269bb4f --- /dev/null +++ b/setup/entrypoint.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + +source "$(dirname "${BASH_SOURCE[0]}")/helpers.sh" + + +# -------------------------------------------------------- +# Users declarations + +declare -A users_passwords +users_passwords=( + [logstash_internal]="${LOGSTASH_INTERNAL_PASSWORD:-}" + [kibana_system]="${KIBANA_SYSTEM_PASSWORD:-}" +) + +declare -A users_roles +users_roles=( + [logstash_internal]='logstash_writer' +) + +# -------------------------------------------------------- +# Roles declarations + +declare -A roles_files +roles_files=( + [logstash_writer]='logstash_writer.json' +) + +# -------------------------------------------------------- + + +echo "-------- $(date) --------" + +state_file="$(dirname ${BASH_SOURCE[0]})/state/.done" +if [[ -e "$state_file" ]]; then + log "State file exists at '${state_file}', skipping setup" + exit 0 +fi + +log 'Waiting for availability of Elasticsearch' +wait_for_elasticsearch +sublog 'Elasticsearch is running' + +for role in "${!roles_files[@]}"; do + log "Role '$role'" + + declare body_file + body_file="$(dirname "${BASH_SOURCE[0]}")/roles/${roles_files[$role]:-}" + if [[ ! -f "${body_file:-}" ]]; then + sublog "No role body found at '${body_file}', skipping" + continue + fi + + sublog 'Creating/updating' + ensure_role "$role" "$(<"${body_file}")" +done + +for user in "${!users_passwords[@]}"; do + log "User '$user'" + if [[ -z "${users_passwords[$user]:-}" ]]; then + sublog 'No password defined, skipping' + continue + fi + + declare -i user_exists=0 + user_exists="$(check_user_exists "$user")" + + if ((user_exists)); then + sublog 'User exists, setting password' + set_user_password "$user" "${users_passwords[$user]}" + else + if [[ -z "${users_roles[$user]:-}" ]]; then + err ' No role defined, skipping creation' + continue + fi + + sublog 'User does not exist, creating' + create_user "$user" "${users_passwords[$user]}" "${users_roles[$user]}" + fi +done + +mkdir -p "$(dirname "${state_file}")" +touch "$state_file" diff --git a/setup/helpers.sh b/setup/helpers.sh new file mode 100644 index 0000000..2457372 --- /dev/null +++ b/setup/helpers.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash + +# Log a message. +function log { + echo "[+] $1" +} + +# Log a message at a sub-level. +function sublog { + echo " ⠿ $1" +} + +# Log an error. +function err { + echo "[x] $1" >&2 +} + +# Poll the 'elasticsearch' service until it responds with HTTP code 200. +function wait_for_elasticsearch { + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' "http://${elasticsearch_host}:9200/" ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + # retry for max 300s (60*5s) + for _ in $(seq 1 60); do + output="$(curl "${args[@]}" || true)" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + break + fi + + sleep 5 + done + + if ((result)); then + echo -e "\n${output::-3}" + fi + + return $result +} + +# Verify that the given Elasticsearch user exists. +function check_user_exists { + local username=$1 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/user/${username}" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local -i exists=0 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 || "${output: -3}" -eq 404 ]]; then + result=0 + fi + if [[ "${output: -3}" -eq 200 ]]; then + exists=1 + fi + + if ((result)); then + echo -e "\n${output::-3}" + else + echo "$exists" + fi + + return $result +} + +# Set password of a given Elasticsearch user. +function set_user_password { + local username=$1 + local password=$2 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/user/${username}/_password" + '-X' 'POST' + '-H' 'Content-Type: application/json' + '-d' "{\"password\" : \"${password}\"}" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + fi + + if ((result)); then + echo -e "\n${output::-3}\n" + fi + + return $result +} + +# Create the given Elasticsearch user. +function create_user { + local username=$1 + local password=$2 + local role=$3 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/user/${username}" + '-X' 'POST' + '-H' 'Content-Type: application/json' + '-d' "{\"password\":\"${password}\",\"roles\":[\"${role}\"]}" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + fi + + if ((result)); then + echo -e "\n${output::-3}\n" + fi + + return $result +} + +# Ensure that the given Elasticsearch role is up-to-date, create it if required. +function ensure_role { + local name=$1 + local body=$2 + + local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}" + + local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' + "http://${elasticsearch_host}:9200/_security/role/${name}" + '-X' 'POST' + '-H' 'Content-Type: application/json' + '-d' "$body" + ) + + if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then + args+=( '-u' "elastic:${ELASTIC_PASSWORD}" ) + fi + + local -i result=1 + local output + + output="$(curl "${args[@]}")" + if [[ "${output: -3}" -eq 200 ]]; then + result=0 + fi + + if ((result)); then + echo -e "\n${output::-3}\n" + fi + + return $result +} diff --git a/setup/roles/logstash_writer.json b/setup/roles/logstash_writer.json new file mode 100644 index 0000000..9501665 --- /dev/null +++ b/setup/roles/logstash_writer.json @@ -0,0 +1,34 @@ +{ + "cluster": [ + "manage_index_templates", + "monitor", + "manage_ilm" + ], + "indices": [ + { + "names": [ + "logs-generic-default", + "logstash-*", + "ecs-logstash-*", + "netflow-*" + ], + "privileges": [ + "write", + "create", + "create_index", + "manage", + "manage_ilm" + ] + }, + { + "names": [ + "logstash", + "ecs-logstash" + ], + "privileges": [ + "write", + "manage" + ] + } + ] +} diff --git a/tattle_tale_cfg.py b/tattle_tale_cfg.py deleted file mode 100644 index 6a27b52..0000000 --- a/tattle_tale_cfg.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 - -# Username for Shadowserver authentication -shadow_user = "" - -# Password for Shadowserver authentication -shadow_pass = "" - -# URL for Shadowserver download list -shadow_url = "https://dl.shadowserver.org/reports/index.php" - -# Directory to store Shadowserver downloads -# Must include the trailing / -shadow_dir = "/opt/tattle_tale/downloads/" - -# Directory to store new dictionaries -# Must include the trailing / -shadow_temp_dir = "/opt/tattle_tale/dicts/" - -# Dictionary location for Logstash -# Must include the trailing / -logstash_dict_dir = "/etc/logstash/dictionaries/" - -# File containing list of IP addresses to query via SNMP -device_list = "/opt/tattle_tale/router_list.txt" - -# SNMPv2c community string -snmp_community = "" - -# Regular Expression to pull Peer name from interface description -# Change to match your naming convention -# BTW, https://regex101.com is really good for testing regex -if_regex = "\[NAME=(.+?)\]" - diff --git a/tt-attack-report-ideas.txt b/tt-attack-report-ideas.txt new file mode 100644 index 0000000..da76ff5 --- /dev/null +++ b/tt-attack-report-ideas.txt @@ -0,0 +1,186 @@ +When TT identifies a "malicious sender" (an IP targeting a known reflector/amp): + +TT can identify: + Src IP/port (may be forged) coming from an external IP + Dest IP/port + +Current upload logic: If IP A is seen more than X times connecting to discreet IPs +(with a known-suspect dest port/protocol), than it's considered a potentially malicious +true-positive attack + +Counters set back to zero. (keyed off of source IP) + +Source IP: Unique count + +Add "first observation" + "most recent observation" time: + +{ + "_index": "tattle_tale", + "_type": "_doc", + "_id": "Y7MiQXsBSPFZEKNFxyww", + "_version": 1, + "_score": null, + "_source": { + "@timestamp": "2021-08-13T20:09:02.000Z", + "flow_id": "FYUhQXsBg1TcSTBF4KEJ", + "src_autonomous_system": 3320, + "src_addr": "87.156.23.151", + "input_ifname": "SERVERHUB", + "dst_asn": 11351, + "dst_port": 53, + "uniq_dst_ip": 10, (count) + "reporter": "Misslie Command Test" + }, + "fields": { + "@timestamp": [ + "2021-08-13T20:09:02.000Z" + ] + }, + "sort": [ + 1628885342000 + ] +} + + +{ + "_index": "netflow-2021.08.13", + "_type": "_doc", + "_id": "F4QoQHsBg1TcSTBFxtI9", + "_version": 1, + "_score": null, + "_source": { + "fileset": { + "name": "log" + }, + "@timestamp": "2021-08-13T15:36:56.000Z", + "related": { + "ip": [ + "91.151.88.110", + "98.7.235.26" + ] + }, + "service": { + "type": "netflow" + }, + "@version": "1", + "observer": { +** "ip": "66.109.2.23" + }, + "destination": { + "port": 3283, + "locality": "external", + "hostname": "cpe-98-7-235-26.nyc.res.rr.com", + "ip": "98.7.235.26" + }, + "input": { + "type": "netflow" + }, + "netflow.dst_service_type": "residential", + "source": { + "bytes": 32, + "hostname": "91.151.88.110", + "packets": 1, + "ip": "91.151.88.110", + "port": 7777, + "locality": "external" + }, + "event": { + "type": [ + "connection" + ], + "created": "2021-08-13T15:36:58.756Z", + "module": "netflow", + "dataset": "netflow.log", + "kind": "event", + "category": [ + "network_traffic", + "network" + ], + "action": "netflow_flow" + }, + "flow": { + "id": "H0p-KzJ9Xmc", + "locality": "external" + }, + "network": { + "bytes": 32, + "transport": "udp", + "community_id": "1:+1KOHYUJfURGJIsePxTRD9RmPwU=", + "packets": 1, + "direction": "unknown", + "iana_number": 17 + }, + "netflow": { + "flow_end_milliseconds": "2021-08-13T15:35:56.994Z", + "ip_class_of_service": 0, + "bgp_source_as_number": 212219, + "flow_end_sys_up_time": 3343527042, + "destination_ipv4_prefix_length": 18, + "type": "netflow_flow", + "packet_delta_count": 1, + "destination_ipv4_address": "98.7.235.26", +** "ingress_interface": 979, + "source_ipv4_address": "91.151.88.110", + "open_udp": "ard", + "tcp_control_bits": 0, + "octet_delta_count": 32, + "flow_start_milliseconds": "2021-08-13T15:35:56.994Z", + "bgp_destination_as_number": 12271, + "flow_start_sys_up_time": 3343527042, + "icmp_type_code_ipv4": 0, + "ip_version": 4, + "egress_interface": 1316, + "input_ifname": "SERVERHUB", + "bgp_next_hop_ipv4_address": "66.109.2.246", + "ingress_interface_type": 1, + "source_transport_port": 7777, + "exporter": { + "uptime_millis": 0, + "source_id": 60, + "address": "66.109.2.23:33092", + "version": 10, + "timestamp": "2021-08-13T15:36:56.000Z" + }, + "source_ipv4_prefix_length": 24, + "destination_transport_port": 3283, + "ip_next_hop_ipv4_address": "66.109.5.120", + "protocol_identifier": 17 + }, + "ecs": { + "version": "1.9.0" + }, + "agent": { + "type": "filebeat", + "ephemeral_id": "d45604c6-26a1-4c61-9e11-c33193739caf", + "name": "netflow-storage.nsa.ctec.charterlab.com", + "hostname": "netflow-storage.nsa.ctec.charterlab.com", + "id": "46083867-69a3-434c-aa9f-93e782846006", + "version": "7.13.3" + }, + "tags": [ + "forwarded", + "beats_input_raw_event" + ] + }, + "fields": { + "netflow.flow_start_milliseconds": [ + "2021-08-13T15:35:56.994Z" + ], + "@timestamp": [ + "2021-08-13T15:36:56.000Z" + ], + "event.created": [ + "2021-08-13T15:36:58.756Z" + ], + "netflow.exporter.timestamp": [ + "2021-08-13T15:36:56.000Z" + ], + "netflow.flow_end_milliseconds": [ + "2021-08-13T15:35:56.994Z" + ] + }, + "sort": [ + 1628869016000 + ] +} +