diff --git a/.travis.yml b/.travis.yml index 302077a76..f10db8075 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,7 @@ addons: services: - docker - redis-server + - elasticsearch branches: only: @@ -32,7 +33,8 @@ branches: - dev before_install: - - docker run -d --name influxdb -e INFLUXDB_DB=openwisp2 -p 8086:8086 influxdb:alpine + # - docker run -d --name influxdb -e INFLUXDB_DB=openwisp2 -p 8086:8086 influxdb:alpine + # - docker run -p 9200:9200 docker.elastic.co/elasticsearch/elasticsearch:7.8.0 - pip install -U pip wheel setuptools - pip install $DJANGO - pip install -U -r requirements-test.txt diff --git a/docker-compose.yml b/docker-compose.yml index 6386355ed..e1be0de26 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -22,7 +22,25 @@ services: INFLUXDB_DB: openwisp2 INFLUXDB_USER: openwisp INFLUXDB_USER_PASSWORD: openwisp - + # clustered version of elasticsearch is used as that might be used in production + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0 + container_name: es01 + environment: + - node.name: openwisp2 + - cluster.name: openwisp2 + - bootstrap.memory_lock: true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + volumes: + - data01:/usr/share/elasticsearch/data + ports: + - 9200:9200 + networks: + - elastic redis: image: redis:5.0-alpine ports: @@ -31,3 +49,7 @@ services: volumes: influxdb-data: {} + +networks: + elastic: + driver: bridge diff --git a/openwisp_monitoring/db/__init__.py b/openwisp_monitoring/db/__init__.py index 063d2d8f7..6d8207fa6 100644 --- a/openwisp_monitoring/db/__init__.py +++ b/openwisp_monitoring/db/__init__.py @@ -1,7 +1,6 @@ from .backends import timeseries_db chart_query = timeseries_db.queries.chart_query -default_chart_query = timeseries_db.queries.default_chart_query device_data_query = timeseries_db.queries.device_data_query -__all__ = ['timeseries_db', 'chart_query', 'default_chart_query', 'device_data_query'] +__all__ = ['timeseries_db', 'chart_query', 'device_data_query'] diff --git a/openwisp_monitoring/db/backends/__init__.py b/openwisp_monitoring/db/backends/__init__.py index ae4847acd..f77cad727 100644 --- a/openwisp_monitoring/db/backends/__init__.py +++ b/openwisp_monitoring/db/backends/__init__.py @@ -48,7 +48,7 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): except ImportError as e: # The database backend wasn't found. Display a helpful error message # listing all built-in database backends. - builtin_backends = ['influxdb'] + builtin_backends = ['influxdb', 'elasticsearch'] if backend_name not in [ f'openwisp_monitoring.db.backends.{b}' for b in builtin_backends ]: diff --git a/openwisp_monitoring/db/backends/elasticsearch/__init__.py b/openwisp_monitoring/db/backends/elasticsearch/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openwisp_monitoring/db/backends/elasticsearch/client.py b/openwisp_monitoring/db/backends/elasticsearch/client.py new file mode 100644 index 000000000..8b6ab5330 --- /dev/null +++ b/openwisp_monitoring/db/backends/elasticsearch/client.py @@ -0,0 +1,323 @@ +import json +import logging +from copy import deepcopy +from datetime import datetime, timedelta + +from django.conf import settings +from django.core.exceptions import ValidationError +from django.utils.functional import cached_property +from elasticsearch import Elasticsearch +from elasticsearch.exceptions import ElasticsearchException, NotFoundError +from elasticsearch_dsl import Search +from elasticsearch_dsl.connections import connections +from pytz import timezone as tz + +from openwisp_utils.utils import deep_merge_dicts + +from .. import TIMESERIES_DB +from .index import MetricIndex, Point, find_metric +from .queries import default_chart_query, math_map, operator_lookup + +logger = logging.getLogger(__name__) + + +class DatabaseClient(object): + _AGGREGATE = [ + 'filters', + 'children', + 'parent', + 'date_histogram', + 'auto_date_histogram', + 'date_range', + 'geo_distance', + 'geohash_grid', + 'geotile_grid', + 'global', + 'geo_centroid', + 'global', + 'ip_range', + 'missing', + 'nested', + 'range', + 'reverse_nested', + 'significant_terms', + 'significant_text', + 'sampler', + 'terms', + 'diversified_sampler', + 'composite', + 'top_hits', + 'avg', + 'weighted_avg', + 'cardinality', + 'extended_stats', + 'geo_bounds', + 'max', + 'min', + 'percentiles', + 'percentile_ranks', + 'scripted_metric', + 'stats', + 'sum', + 'value_count', + ] + backend_name = 'elasticsearch' + + def __init__(self, db_name='metric'): + self.db_name = db_name or TIMESERIES_DB['NAME'] + self.client_error = ElasticsearchException + + def create_database(self): + """ creates connection to elasticsearch """ + connections.create_connection(hosts=[TIMESERIES_DB['HOST']]) + self.get_db + + def drop_database(self): + """ deletes all indices """ + self.delete_metric_data() + self.get_db.close() + logger.debug('Deleted all indices from Elasticsearch') + + @cached_property + def get_db(self): + """ Returns an ``Elasticsearch Client`` instance """ + # TODO: AUTHENTICATION remains see `SecurityClient` + return Elasticsearch( + [f"{TIMESERIES_DB['HOST']}:{TIMESERIES_DB['PORT']}"], + http_auth=(TIMESERIES_DB['USER'], TIMESERIES_DB['PASSWORD']), + retry_on_timeout=True, + ) + + def create_or_alter_retention_policy(self, name, duration): + """ creates or alters existing retention policy if necessary """ + # TODO + pass + + def query(self, query, precision=None): + index = query.pop('key') + return Search(index=index).from_dict(query).execute().to_dict() + + def write(self, name, values, **kwargs): + # TODO: Add support for retention policy + tags = kwargs.get('tags') + timestamp = kwargs.get('timestamp') + metric_id = find_metric(name, tags, add=True) + metric_index = MetricIndex().get(metric_id, index=name) + point = Point(fields=values, time=timestamp or datetime.now()) + metric_index.points.append(point) + metric_index.save() + + def read(self, key, fields, tags, limit=1, order='-time', **kwargs): + extra_fields = kwargs.get('extra_fields') + time_format = kwargs.get('time_format') + # since = kwargs.get('since') + metric_id = find_metric(key, tags) + if not metric_id: + return list() + try: + metric_index = MetricIndex().get(metric_id, index=key) + except NotFoundError: + return [] + if order == 'time': + points = list(metric_index.points[0:limit]) + elif order == '-time': + points = list(reversed(metric_index.points))[0:limit] + else: + raise self.client_error( + f'Invalid order "{order}" passed.\nYou may pass "time" / "-time" to get ' + 'result sorted in ascending /descending order respectively.' + ) + if not points: + return list() + # distinguish between traffic and clients + for point in list(points): + if fields not in point.fields.to_dict(): + points.remove(point) + if extra_fields and extra_fields != '*': + assert isinstance(extra_fields, list) + _points = [] + for point in points: + point = point.to_dict() + _point = { + 'time': self._format_time(point['time'], time_format), + fields: point['fields'][fields], + } + for extra_field in extra_fields: + if point['fields'].get(extra_field) is not None: + _point.update({extra_field: point['fields'][extra_field]}) + _points.append(_point) + points = _points + elif extra_fields == '*': + points = [ + deep_merge_dicts( + p.fields.to_dict(), {'time': self._format_time(p.time, time_format)} + ) + for p in points + ] + else: + points = [ + deep_merge_dicts( + {fields: p.fields.to_dict()[fields]}, + {'time': self._format_time(p.time, time_format)}, + ) + for p in points + ] + # if since: + # TODO: + return points + + def _format_time(self, obj, time_format=None): + """ returns datetime object in isoformat / unix timestamp and UTC timezone """ + if time_format == 'isoformat': + return obj.astimezone(tz=tz('UTC')).isoformat(timespec='seconds') + return int(obj.astimezone(tz=tz('UTC')).timestamp()) + + def get_list_query(self, query, precision='s'): + response = self.query(query, precision) + points = response['aggregations']['GroupByTime']['buckets'] + list_points = self._fill_points( + query, [self._format(point) for point in points] + ) + return list_points + + def _fill_points(self, query, points): + _range = next( + (item for item in query['query']['bool']['must'] if item.get('range')), None + ) + if not _range or not points: + return points + days = int(_range['range']['points.time']['from'][4:-3]) + start_time = datetime.now() + end_time = start_time - timedelta(days=days) # include today + dummy_point = deepcopy(points[0]) + interval = points[0]['time'] - points[1]['time'] + start_ts = points[0]['time'] + interval + end_ts = points[-1]['time'] - interval + for field in dummy_point.keys(): + dummy_point[field] = None + while start_ts < start_time.timestamp(): + dummy_point['time'] = start_ts + points.insert(0, deepcopy(dummy_point)) + start_ts += interval + # TODO: This needs to be fixed and shouldn't be required since intervals are set + while points[-1]['time'] < end_time.timestamp(): + points.pop(-1) + while end_ts > end_time.timestamp(): + dummy_point['time'] = end_ts + points.append(deepcopy(dummy_point)) + end_ts -= interval + return points + + def delete_metric_data(self, key=None, tags=None): + """ + deletes a specific metric based on the key and tags + provided, you may also choose to delete all metrics + """ + if key and tags: + metric_id = find_metric(key, tags) + self.get_db.delete(index=key, id=metric_id) + else: + self.get_db.indices.delete(index='*', ignore=[400, 404]) + + # Chart related functions below + + def validate_query(self, query): + if isinstance(query, str): + query = json.loads(query) + # Elasticsearch currently supports validation of only query section, + # aggs, size, _source etc. are not supported + valid_check = self.get_db.indices.validate_query(body={'query': query['query']}) + # Show a helpful message for failure + if not valid_check['valid']: + raise ValidationError(valid_check['error']) + return self._is_aggregate(query) + + def _is_aggregate(self, q): + agg_dict = q['aggs']['GroupByTime']['aggs'].values() + agg = [] + for item in agg_dict: + agg.append(next(iter(item))) + return True if set(agg) <= set(self._AGGREGATE) else False + + def get_query( + self, + chart_type, + params, + time, + group_map, + summary=False, + fields=None, + query=None, + timezone=settings.TIME_ZONE, + ): + query['key'] = params.pop('key') + query = json.dumps(query) + for k, v in params.items(): + query = query.replace('{' + k + '}', v) + query = self._group_by(query, time, chart_type, group_map, strip=summary) + query = json.loads(query) + if summary: + _range = next( + (item for item in query['query']['bool']['must'] if item.get('range')), + None, + ) + if _range: + query['query']['bool']['must'].remove(_range) + query['aggs']['GroupByTime']['date_histogram']['time_zone'] = timezone + return query + + def _group_by(self, query, time, chart_type, group_map, strip=False): + if not self.validate_query(query): + return query + if not strip and not chart_type == 'histogram': + value = group_map[time] + query = query.replace('1d/d', f'{time}/d') + query = query.replace('10m', value) + if strip: + query = query.replace('10m', time) + return query + + # TODO: + def _get_top_fields( + self, + query, + params, + chart_type, + group_map, + number, + time, + timezone=settings.TIME_ZONE, + ): + pass + + def _format(self, point): + pt = {} + # Convert time from milliseconds -> seconds precision + pt['time'] = int(point['key'] / 1000) + for key, value in point.items(): + if isinstance(value, dict): + pt[key] = self._transform_field(key, value['value']) + return pt + + def _transform_field(self, field, value): + """ Performs arithmetic operations on the field if required """ + if value is None: + return value + if field in math_map: + op = operator_lookup.get(math_map[field]['operator']) + if op is not None: + value = op(value, math_map[field]['value']) + return value + + def default_chart_query(self, tags): + q = deepcopy(default_chart_query) + if not tags: + q['query']['bool']['must'].pop(0) + q['query']['bool']['must'].pop(1) + return q + + +# Old data - delete by query (inefficient) / retention policy - Index lifecycle management +# Fix Average - currently it's computing average over all fields! +# Time Interval - fix range +# Device query diff --git a/openwisp_monitoring/db/backends/elasticsearch/index.py b/openwisp_monitoring/db/backends/elasticsearch/index.py new file mode 100644 index 000000000..cfa866a9f --- /dev/null +++ b/openwisp_monitoring/db/backends/elasticsearch/index.py @@ -0,0 +1,67 @@ +import uuid + +from django.conf import settings +from elasticsearch import Elasticsearch +from elasticsearch.exceptions import NotFoundError +from elasticsearch.helpers import bulk +from elasticsearch_dsl import Date, Document, InnerDoc, Nested, Q, Search +from swapper import load_model + + +class Point(InnerDoc): + time = Date(required=True, default_timezone=settings.TIME_ZONE) + fields = Nested(dynamic=True, required=True, multi=True) + + +class MetricIndex(Document): + tags = Nested(dynamic=True, required=True, multi=True) + # returns an empty list if not present + points = Nested(Point) + + class Index: + # name gets replaced with metric's key + name = 'metric' + settings = { + "number_of_shards": 1, + "number_of_replicas": 0, + } + + +def find_metric(index, tags, add=False): + client = Elasticsearch() + search = Search(using=client, index=index) + if tags: + tags_dict = dict() + for key, value in tags.items(): + tags_dict[f'tags.{key}'] = value + q = Q('bool', must=[Q('match', **{k: str(v)}) for k, v in tags_dict.items()]) + else: + q = Q() + try: + return list(search.query(q).execute())[0].meta['id'] + except (NotFoundError, AttributeError, IndexError): + return add_index(index, tags)['_id'] if add else None + + +def add_index(key, tags, id=None): + """ + Add index to elasticsearch using ``keys``, ``tags`` and ``id`` provided. + If no ``id`` is provided a random ``uuid`` would be used. + """ + obj = MetricIndex(meta={'id': id or uuid.uuid1()}, tags=tags) + obj.meta.index = key + obj.save() + return obj.to_dict(include_meta=True) + + +def bulk_indexing(): + """ Index all existing metrics """ + Metric = load_model('monitoring', 'Metric') + MetricIndex.init() + es = Elasticsearch() + bulk( + client=es, + actions=( + add_index(m.key, m.tags, m.id) for m in Metric.objects.all().iterator() + ), + ) diff --git a/openwisp_monitoring/db/backends/elasticsearch/queries.py b/openwisp_monitoring/db/backends/elasticsearch/queries.py new file mode 100644 index 000000000..ec1c81689 --- /dev/null +++ b/openwisp_monitoring/db/backends/elasticsearch/queries.py @@ -0,0 +1,81 @@ +import operator +from copy import deepcopy + +default_chart_query = { + 'query': { + 'bool': { + 'must': [ + {'match': {'tags.object_id': {'query': '{object_id}'}}}, + {'match': {'tags.content_type': {'query': '{content_type}'}}}, + {'range': {'points.time': {'from': 'now-1d/d', 'to': 'now/d'}}}, + ] + } + }, + '_source': False, + 'size': 0, + 'aggs': { + 'GroupByTime': { + 'date_histogram': { + 'field': 'points.time', + 'fixed_interval': '10m', + 'format': 'date_time_no_millis', + 'order': {'_key': 'desc'}, + }, + 'aggs': {'{field_name}': {'avg': {'field': 'points.fields.{field_name}'}}}, + } + }, +} + +math_map = { + 'uptime': {'operator': '*', 'value': 100}, + 'memory_usage': {'operator': '*', 'value': 100}, + 'CPU_load': {'operator': '*', 'value': 100}, + 'disk_usage': {'operator': '*', 'value': 100}, + 'upload': {'operator': '/', 'value': 1000000000}, + 'download': {'operator': '/', 'value': 1000000000}, +} + +operator_lookup = { + '+': operator.add, + '-': operator.sub, + '*': operator.mul, + '/': operator.truediv, +} + + +def _make_query(aggregation=None): + query = deepcopy(default_chart_query) + if aggregation: + query['aggs']['GroupByTime']['aggs'] = aggregation + return query + + +def _get_chart_query(): + aggregation_dict = { + 'uptime': {'uptime': {'avg': {'field': 'points.fields.reachable'}}}, + 'packet_loss': {'packet_loss': {'avg': {'field': 'points.fields.loss'}}}, + 'rtt': { + 'RTT_average': {'avg': {'field': 'points.fields.rtt_avg'}}, + 'RTT_max': {'avg': {'field': 'points.fields.rtt_max'}}, + 'RTT_min': {'avg': {'field': 'points.fields.rtt_min'}}, + }, + 'traffic': { + 'upload': {'sum': {'field': 'points.fields.rx_bytes'}}, + 'download': {'sum': {'field': 'points.fields.tx_bytes'}}, + }, + 'wifi_clients': { + 'wifi_clients': {'cardinality': {'field': 'points.fields.{field_name}'}} + }, + 'memory': {'memory_usage': {'avg': {'field': 'points.fields.percent_used'}}}, + 'cpu': {'CPU_load': {'avg': {'field': 'points.fields.cpu_usage'}}}, + 'disk': {'disk_usage': {'avg': {'field': 'points.fields.used_disk'}}}, + } + query = {} + for k, v in aggregation_dict.items(): + query[k] = {'elasticsearch': _make_query(v)} + return query + + +chart_query = _get_chart_query() + +device_data_query = None diff --git a/openwisp_monitoring/db/backends/influxdb/client.py b/openwisp_monitoring/db/backends/influxdb/client.py index c93bb63de..694e4cce5 100644 --- a/openwisp_monitoring/db/backends/influxdb/client.py +++ b/openwisp_monitoring/db/backends/influxdb/client.py @@ -50,7 +50,6 @@ class DatabaseClient(object): backend_name = 'influxdb' def __init__(self, db_name=None): - self._db = None self.db_name = db_name or TIMESERIES_DB['NAME'] self.client_error = DatabaseException.client_error @@ -295,3 +294,9 @@ def _get_top_fields( keys.reverse() top = keys[0:number] return [item.replace('sum_', '') for item in top] + + def default_chart_query(self, tags): + q = "SELECT {field_name} FROM {key} WHERE time >= '{time}'" + if tags: + q += " AND content_type = '{content_type}' AND object_id = '{object_id}'" + return q diff --git a/openwisp_monitoring/db/backends/influxdb/queries.py b/openwisp_monitoring/db/backends/influxdb/queries.py index 61c835d39..2fd426fd9 100644 --- a/openwisp_monitoring/db/backends/influxdb/queries.py +++ b/openwisp_monitoring/db/backends/influxdb/queries.py @@ -59,11 +59,6 @@ }, } -default_chart_query = [ - "SELECT {field_name} FROM {key} WHERE time >= '{time}'", - " AND content_type = '{content_type}' AND object_id = '{object_id}'", -] - device_data_query = ( "SELECT data FROM {0}.{1} WHERE pk = '{2}' " "ORDER BY time DESC LIMIT 1" ) diff --git a/openwisp_monitoring/device/base/models.py b/openwisp_monitoring/device/base/models.py index 0b7cc7387..d15986406 100644 --- a/openwisp_monitoring/device/base/models.py +++ b/openwisp_monitoring/device/base/models.py @@ -96,8 +96,17 @@ def data(self): """ if self.__data: return self.__data - q = device_data_query.format(SHORT_RP, self.__key, self.pk) - points = timeseries_db.get_list_query(q, precision=None) + # skipped this due to performance and inverted index issues! + if not timeseries_db.backend_name == 'elasticsearch': + q = device_data_query.format(SHORT_RP, self.__key, self.pk) + points = timeseries_db.get_list_query(q, precision=None) + else: + points = timeseries_db.read( + key=self.__key, + fields='data', + tags={'pk': self.pk}, + time_format='isoformat', + ) if not points: return None self.data_timestamp = points[0]['time'] diff --git a/openwisp_monitoring/monitoring/base/models.py b/openwisp_monitoring/monitoring/base/models.py index b9258acef..73c39a2f7 100644 --- a/openwisp_monitoring/monitoring/base/models.py +++ b/openwisp_monitoring/monitoring/base/models.py @@ -19,7 +19,7 @@ from openwisp_utils.base import TimeStampedEditableModel -from ...db import default_chart_query, timeseries_db +from ...db import timeseries_db from ..charts import ( CHART_CONFIGURATION_CHOICES, DEFAULT_COLORS, @@ -319,10 +319,8 @@ def top_fields(self): @property def _default_query(self): - q = default_chart_query[0] - if self.metric.object_id: - q += default_chart_query[1] - return q + tags = True if self.metric.object_id else False + return timeseries_db.default_chart_query(tags) def get_query( self, @@ -511,6 +509,7 @@ def _is_crossed_by(self, current_value, time=None): continue if not self._value_crossed(point[self.metric.field_name]): return False + print(point['time']) if self._time_crossed( make_aware(datetime.fromtimestamp(point['time'])) ): diff --git a/openwisp_monitoring/monitoring/tests/__init__.py b/openwisp_monitoring/monitoring/tests/__init__.py index 40b0dce75..1a96b9f1a 100644 --- a/openwisp_monitoring/monitoring/tests/__init__.py +++ b/openwisp_monitoring/monitoring/tests/__init__.py @@ -7,6 +7,7 @@ from ...db import timeseries_db from ...db.backends import TIMESERIES_DB +from ...db.backends.elasticsearch.queries import _make_query from .. import register_chart, unregister_chart start_time = now() @@ -28,7 +29,10 @@ "SELECT {fields|SUM|/ 1} FROM {key} " "WHERE time >= '{time}' AND content_type = " "'{content_type}' AND object_id = '{object_id}'" - ) + ), + 'elasticsearch': _make_query( + {'{field_name}': {'sum': {'field': 'points.fields.{field_name}'}}} + ), }, }, 'dummy': { @@ -45,7 +49,7 @@ 'description': 'Bugged chart for testing purposes.', 'unit': 'bugs', 'order': 999, - 'query': {'influxdb': "BAD"}, + 'query': {'influxdb': "BAD", 'elasticsearch': "BAD"}, }, 'default': { 'type': 'line', @@ -57,7 +61,8 @@ 'influxdb': ( "SELECT {field_name} FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'elasticsearch': _make_query(), }, }, 'multiple_test': { @@ -70,7 +75,13 @@ 'influxdb': ( "SELECT {field_name}, value2 FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'elasticsearch': _make_query( + { + '{field_name}': {'sum': {'field': 'points.fields.{field_name}'}}, + 'value2': {'sum': {'field': 'points.fields.value2'}}, + } + ), }, }, 'mean_test': { @@ -83,7 +94,8 @@ 'influxdb': ( "SELECT MEAN({field_name}) AS {field_name} FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'elasticsearch': _make_query(), }, }, 'sum_test': { @@ -96,7 +108,10 @@ 'influxdb': ( "SELECT SUM({field_name}) AS {field_name} FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'elasticsearch': _make_query( + {'{field_name}': {'sum': {'field': 'points.fields.{field_name}'}}} + ), }, }, 'top_fields_mean': { diff --git a/requirements.txt b/requirements.txt index f72424e9f..95045a41c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ djangorestframework>=3.11,<3.12 mac-vendor-lookup~=0.1 swapper~=1.1 django-cache-memoize~=0.1 +elasticsearch-dsl>=7.0.0,<8.0.0 diff --git a/tests/openwisp2/settings.py b/tests/openwisp2/settings.py index 715eb7e35..3310cea97 100644 --- a/tests/openwisp2/settings.py +++ b/tests/openwisp2/settings.py @@ -16,13 +16,14 @@ } } +# TODO: This needs to be configurable TIMESERIES_DATABASE = { - 'BACKEND': 'openwisp_monitoring.db.backends.influxdb', + 'BACKEND': 'openwisp_monitoring.db.backends.elasticsearch', 'USER': 'openwisp', 'PASSWORD': 'openwisp', 'NAME': 'openwisp2', - 'HOST': os.getenv('INFLUXDB_HOST', 'localhost'), - 'PORT': '8086', + 'HOST': 'localhost', + 'PORT': '9200', } SECRET_KEY = 'fn)t*+$)ugeyip6-#txyy$5wf2ervc0d2n#h)qb)y5@ly$t*@w'