From 738b1eddcda5adb93f32514c6012ff9ef4eb8e57 Mon Sep 17 00:00:00 2001 From: Federico Gustavo Galland <99492720+f-galland@users.noreply.github.com> Date: Mon, 8 Jul 2024 08:03:25 -0300 Subject: [PATCH 1/2] Rename docker compose files (#296) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Rename integrations compose files * Change references in README.md files * Fix relative paths --------- Co-authored-by: Álex Ruiz --- integrations/amazon-security-lake/CONTRIBUTING.md | 10 +++++----- ...urity-lake.yml => compose.amazon-security-lake.yml} | 0 .../{elastic.yml => compose.indexer-elastic.yml} | 0 .../{opensearch.yml => compose.indexer-opensearch.yml} | 0 .../docker/{splunk.yml => compose.indexer-splunk.yml} | 0 ...manager-elastic.yml => compose.manager-elastic.yml} | 0 ...r-opensearch.yml => compose.manager-opensearch.yml} | 0 .../{manager-splunk.yml => compose.manager-splunk.yml} | 0 integrations/elastic/README.md | 8 ++++---- integrations/opensearch/README.md | 8 ++++---- integrations/splunk/README.md | 8 ++++---- 11 files changed, 17 insertions(+), 17 deletions(-) rename integrations/docker/{amazon-security-lake.yml => compose.amazon-security-lake.yml} (100%) rename integrations/docker/{elastic.yml => compose.indexer-elastic.yml} (100%) rename integrations/docker/{opensearch.yml => compose.indexer-opensearch.yml} (100%) rename integrations/docker/{splunk.yml => compose.indexer-splunk.yml} (100%) rename integrations/docker/{manager-elastic.yml => compose.manager-elastic.yml} (100%) rename integrations/docker/{manager-opensearch.yml => compose.manager-opensearch.yml} (100%) rename integrations/docker/{manager-splunk.yml => compose.manager-splunk.yml} (100%) diff --git a/integrations/amazon-security-lake/CONTRIBUTING.md b/integrations/amazon-security-lake/CONTRIBUTING.md index 7675aa03c7961..6dc91e1ece047 100644 --- a/integrations/amazon-security-lake/CONTRIBUTING.md +++ b/integrations/amazon-security-lake/CONTRIBUTING.md @@ -5,7 +5,7 @@ A demo of the integration can be started using the content of this folder and Docker. Open a terminal in the `wazuh-indexer/integrations` folder and start the environment. ```console -docker compose -f ./docker/amazon-security-lake.yml up -d +docker compose -f ./docker/compose.amazon-security-lake.yml up -d ``` This Docker Compose project will bring up these services: @@ -13,10 +13,10 @@ This Docker Compose project will bring up these services: - a _wazuh-indexer_ node - a _wazuh-dashboard_ node - a _logstash_ node -- our [events generator](./tools/events-generator/README.md) +- our [events generator](../tools/events-generator/README.md) - an AWS Lambda Python container. -On the one hand, the event generator will push events constantly to the indexer, to the `wazuh-alerts-4.x-sample` index by default (refer to the [events generator](./tools/events-generator/README.md) documentation for customization options). On the other hand, Logstash will query for new data and deliver it to output configured in the pipeline, which can be one of `indexer-to-s3` or `indexer-to-file`. +On the one hand, the event generator will push events constantly to the indexer, to the `wazuh-alerts-4.x-sample` index by default (refer to the [events generator](../tools/events-generator/README.md) documentation for customization options). On the other hand, Logstash will query for new data and deliver it to output configured in the pipeline, which can be one of `indexer-to-s3` or `indexer-to-file`. The `indexer-to-s3` pipeline is the method used by the integration. This pipeline delivers the data to an S3 bucket, from which the data is processed using a Lambda function, to finally be sent to the Amazon Security Lake bucket in Parquet format. @@ -33,13 +33,13 @@ After 5 minutes, the first batch of data will show up in http://localhost:9444/u bash amazon-security-lake/src/invoke-lambda.sh ``` -Processed data will be uploaded to http://localhost:9444/ui/wazuh-aws-security-lake-parquet. Click on any file to download it, and check it's content using `parquet-tools`. Just make sure of installing the virtual environment first, through [requirements.txt](./amazon-security-lake/). +Processed data will be uploaded to http://localhost:9444/ui/wazuh-aws-security-lake-parquet. Click on any file to download it, and check it's content using `parquet-tools`. Just make sure of installing the virtual environment first, through [requirements.txt](./requirements.txt). ```bash parquet-tools show ``` -If the `S3_BUCKET_OCSF` variable is set in the container running the AWS Lambda function, intermediate data in OCSF and JSON format will be written to a dedicated bucket. This is enabled by default, writing to the `wazuh-aws-security-lake-ocsf` bucket. Bucket names and additional environment variables can be configured editing the [amazon-security-lake.yml](./docker/amazon-security-lake.yml) file. +If the `S3_BUCKET_OCSF` variable is set in the container running the AWS Lambda function, intermediate data in OCSF and JSON format will be written to a dedicated bucket. This is enabled by default, writing to the `wazuh-aws-security-lake-ocsf` bucket. Bucket names and additional environment variables can be configured editing the [compose.amazon-security-lake.yml](../docker/compose.amazon-security-lake.yml) file. For development or debugging purposes, you may want to enable hot-reload, test or debug on these files, by using the `--config.reload.automatic`, `--config.test_and_exit` or `--debug` flags, respectively. diff --git a/integrations/docker/amazon-security-lake.yml b/integrations/docker/compose.amazon-security-lake.yml similarity index 100% rename from integrations/docker/amazon-security-lake.yml rename to integrations/docker/compose.amazon-security-lake.yml diff --git a/integrations/docker/elastic.yml b/integrations/docker/compose.indexer-elastic.yml similarity index 100% rename from integrations/docker/elastic.yml rename to integrations/docker/compose.indexer-elastic.yml diff --git a/integrations/docker/opensearch.yml b/integrations/docker/compose.indexer-opensearch.yml similarity index 100% rename from integrations/docker/opensearch.yml rename to integrations/docker/compose.indexer-opensearch.yml diff --git a/integrations/docker/splunk.yml b/integrations/docker/compose.indexer-splunk.yml similarity index 100% rename from integrations/docker/splunk.yml rename to integrations/docker/compose.indexer-splunk.yml diff --git a/integrations/docker/manager-elastic.yml b/integrations/docker/compose.manager-elastic.yml similarity index 100% rename from integrations/docker/manager-elastic.yml rename to integrations/docker/compose.manager-elastic.yml diff --git a/integrations/docker/manager-opensearch.yml b/integrations/docker/compose.manager-opensearch.yml similarity index 100% rename from integrations/docker/manager-opensearch.yml rename to integrations/docker/compose.manager-opensearch.yml diff --git a/integrations/docker/manager-splunk.yml b/integrations/docker/compose.manager-splunk.yml similarity index 100% rename from integrations/docker/manager-splunk.yml rename to integrations/docker/compose.manager-splunk.yml diff --git a/integrations/elastic/README.md b/integrations/elastic/README.md index 27b5cdd7cd209..30d34b88b3a9f 100644 --- a/integrations/elastic/README.md +++ b/integrations/elastic/README.md @@ -11,11 +11,11 @@ This document describes how to prepare a Docker Compose environment to test the 1. Clone the Wazuh repository and navigate to the `integrations/` folder. 2. Run the following command to start the environment: ```bash - docker compose -f ./docker/elastic.yml up -d + docker compose -f ./docker/compose.indexer-elastic.yml up -d ``` 3. If you prefer, you can start the integration with the Wazuh Manager as data source: ```bash - docker compose -f ./docker/manager-elastic.yml up -d + docker compose -f ./docker/compose.manager-elastic.yml up -d ``` The Docker Compose project will bring up the following services: @@ -29,12 +29,12 @@ The Docker Compose project will bring up the following services: For custom configurations, you may need to modify these files: -- [docker/elastic.yml](../docker/elastic.yml): Docker Compose file. +- [docker/compose.indexer-elastic.yml](../docker/compose.indexer-elastic.yml): Docker Compose file. - [docker/.env](../docker/.env): Environment variables file. - [elastic/logstash/pipeline/indexer-to-elastic.conf](./logstash/pipeline/indexer-to-elastic.conf): Logstash Pipeline configuration file. If you opted to start the integration with the Wazuh Manager, you can modify the following files: -- [docker/manager-elastic.yml](../docker/manager-elastic.yml): Docker Compose file. +- [docker/compose.manager-elastic.yml](../docker/compose.manager-elastic.yml): Docker Compose file. - [elastic/logstash/pipeline/manager-to-elastic.conf](./logstash/pipeline/manager-to-elastic.conf): Logstash Pipeline configuration file. Check the files above for **credentials**, ports, and other configurations. diff --git a/integrations/opensearch/README.md b/integrations/opensearch/README.md index 7547473579299..68e8c4b5693a6 100644 --- a/integrations/opensearch/README.md +++ b/integrations/opensearch/README.md @@ -11,11 +11,11 @@ This document describes how to prepare a Docker Compose environment to test the 1. Clone the Wazuh repository and navigate to the `integrations/` folder. 2. Run the following command to start the environment: ```bash - docker compose -f ./docker/opensearch.yml up -d + docker compose -f ./docker/compose.indexer-opensearch.yml up -d ``` 3. If you prefer, you can start the integration with the Wazuh Manager as data source: ```bash - docker compose -f ./docker/manager-opensearch.yml up -d + docker compose -f ./docker/compose.manager-opensearch.yml up -d ``` The Docker Compose project will bring up the following services: @@ -29,12 +29,12 @@ The Docker Compose project will bring up the following services: For custom configurations, you may need to modify these files: -- [docker/opensearch.yml](../docker/opensearch.yml): Docker Compose file. +- [docker/compose.indexer-opensearch.yml](../docker/compose.indexer-opensearch.yml): Docker Compose file. - [docker/.env](../docker/.env): Environment variables file. - [opensearch/logstash/pipeline/indexer-to-opensearch.conf](./logstash/pipeline/indexer-to-opensearch.conf): Logstash Pipeline configuration file. If you opted to start the integration with the Wazuh Manager, you can modify the following files: -- [docker/manager-opensearch.yml](../docker/manager-opensearch.yml): Docker Compose file. +- [docker/compose.manager-opensearch.yml](../docker/compose.manager-opensearch.yml): Docker Compose file. - [opensearch/logstash/pipeline/manager-to-opensearch.conf](./logstash/pipeline/manager-to-opensearch.conf): Logstash Pipeline configuration file. Check the files above for **credentials**, ports, and other configurations. diff --git a/integrations/splunk/README.md b/integrations/splunk/README.md index 5e1bdbdccfb5b..48fa738fc4aef 100644 --- a/integrations/splunk/README.md +++ b/integrations/splunk/README.md @@ -11,11 +11,11 @@ This document describes how to prepare a Docker Compose environment to test the 1. Clone the Wazuh repository and navigate to the `integrations/` folder. 2. Run the following command to start the environment: ```bash - docker compose -f ./docker/splunk.yml up -d + docker compose -f ./docker/compose.indexer-splunk.yml up -d ``` 3. If you prefer, you can start the integration with the Wazuh Manager as data source: ```bash - docker compose -f ./docker/manager-splunk.yml up -d + docker compose -f ./docker/compose.manager-splunk.yml up -d ``` The Docker Compose project will bring up the following services: @@ -28,12 +28,12 @@ The Docker Compose project will bring up the following services: For custom configurations, you may need to modify these files: -- [docker/splunk.yml](../docker/splunk.yml): Docker Compose file. +- [docker/compose.indexer-splunk.yml](../docker/compose.indexer-splunk.yml): Docker Compose file. - [docker/.env](../docker/.env): Environment variables file. - [splunk/logstash/pipeline/indexer-to-splunk.conf](./logstash/pipeline/indexer-to-splunk.conf): Logstash Pipeline configuration file. If you opted to start the integration with the Wazuh Manager, you can modify the following files: -- [docker/manager-splunk.yml](../docker/manager-splunk.yml): Docker Compose file. +- [docker/compose.manager-splunk.yml](../docker/compose.manager-splunk.yml): Docker Compose file. - [splunk/logstash/pipeline/manager-to-splunk.conf](./logstash/pipeline/manager-to-splunk.conf): Logstash Pipeline configuration file. Check the files above for **credentials**, ports, and other configurations. From e43828fd8c470d2b4b8b682fa37ad50792641640 Mon Sep 17 00:00:00 2001 From: Federico Gustavo Galland <99492720+f-galland@users.noreply.github.com> Date: Mon, 8 Jul 2024 08:16:22 -0300 Subject: [PATCH 2/2] Add TLS certificates to web UIs across integrations' docker environments (#297) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add certs generator and dashboard certs configuration * Adding tls to kibana and wazuh dashboard in elastic integration * Adding tls to Kibana in manager to elastic integration * Add TLS to OpenSearch and Wazuh Dashboards * Add TLS to Dashboard in AWS Security Lake integration --------- Co-authored-by: Álex Ruiz --- .../docker/compose.amazon-security-lake.yml | 32 ++++++++++++++ .../docker/compose.indexer-elastic.yml | 43 ++++++++++++++++++- .../docker/compose.indexer-opensearch.yml | 26 +++++++++++ .../docker/compose.indexer-splunk.yml | 32 ++++++++++++++ .../docker/compose.manager-elastic.yml | 11 ++++- 5 files changed, 142 insertions(+), 2 deletions(-) diff --git a/integrations/docker/compose.amazon-security-lake.yml b/integrations/docker/compose.amazon-security-lake.yml index c02b51dab8c58..16ee907668a0d 100644 --- a/integrations/docker/compose.amazon-security-lake.yml +++ b/integrations/docker/compose.amazon-security-lake.yml @@ -65,8 +65,17 @@ services: - 5601:5601 # Map host port 5601 to container port 5601 expose: - "5601" # Expose port 5601 for web access to OpenSearch Dashboards + volumes: + - ./certs/:/usr/share/opensearch-dashboards/config/certs/ + - ./certs/wazuh.dashboard-key.pem:/usr/share/opensearch-dashboards/config/certs/opensearch.key + - ./certs/wazuh.dashboard.pem:/usr/share/opensearch-dashboards/config/certs/opensearch.pem + - ./certs/root-ca.pem:/usr/share/opensearch-dashboards/config/certs/root-ca.pem environment: OPENSEARCH_HOSTS: '["https://wazuh.indexer:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query + SERVER_SSL_ENABLED: 'true' + SERVER_SSL_KEY: '/usr/share/opensearch-dashboards/config/certs/opensearch.key' + SERVER_SSL_CERTIFICATE: '/usr/share/opensearch-dashboards/config/certs/opensearch.pem' + OPENSEARCH_SSL_CERTIFICATEAUTHORITIES: '/usr/share/opensearch-dashboards/config/certs/root-ca.pem' wazuh.integration.security.lake: image: wazuh/indexer-security-lake-integration @@ -128,10 +137,33 @@ services: - ../amazon-security-lake/src:/var/task ports: - "9000:8080" + + generate-certs-config: + image: alpine:latest + volumes: + - ./config:/config + command: | + sh -c " + echo ' + nodes: + indexer: + - name: wazuh.indexer + ip: \"wazuh.indexer\" + server: + - name: wazuh.manager + ip: \"wazuh.manager\" + dashboard: + - name: wazuh.dashboard + ip: \"wazuh.dashboard\" + ' > /config/certs.yml + " wazuh-certs-generator: image: wazuh/wazuh-certs-generator:0.0.1 hostname: wazuh-certs-generator + depends_on: + generate-certs-config: + condition: service_completed_successfully container_name: wazuh-certs-generator entrypoint: sh -c "/entrypoint.sh; chown -R 1000:999 /certificates; chmod 740 /certificates; chmod 440 /certificates/*" volumes: diff --git a/integrations/docker/compose.indexer-elastic.yml b/integrations/docker/compose.indexer-elastic.yml index e30cda48306ab..937d712deee35 100644 --- a/integrations/docker/compose.indexer-elastic.yml +++ b/integrations/docker/compose.indexer-elastic.yml @@ -59,12 +59,44 @@ services: - 5601:5601 # Map host port 5601 to container port 5601 expose: - "5601" # Expose port 5601 for web access to OpenSearch Dashboards + volumes: + - ./certs/:/usr/share/opensearch-dashboards/config/certs/ + - ./certs/wazuh.dashboard-key.pem:/usr/share/opensearch-dashboards/config/certs/opensearch.key + - ./certs/wazuh.dashboard.pem:/usr/share/opensearch-dashboards/config/certs/opensearch.pem + - ./certs/root-ca.pem:/usr/share/opensearch-dashboards/config/certs/root-ca.pem environment: OPENSEARCH_HOSTS: '["https://wazuh.indexer:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query + SERVER_SSL_ENABLED: 'true' + SERVER_SSL_KEY: '/usr/share/opensearch-dashboards/config/certs/opensearch.key' + SERVER_SSL_CERTIFICATE: '/usr/share/opensearch-dashboards/config/certs/opensearch.pem' + OPENSEARCH_SSL_CERTIFICATEAUTHORITIES: '/usr/share/opensearch-dashboards/config/certs/root-ca.pem' + + generate-certs-config: + image: alpine:latest + volumes: + - ./config:/config + command: | + sh -c " + echo ' + nodes: + indexer: + - name: wazuh.indexer + ip: \"wazuh.indexer\" + server: + - name: wazuh.manager + ip: \"wazuh.manager\" + dashboard: + - name: wazuh.dashboard + ip: \"wazuh.dashboard\" + ' > /config/certs.yml + " wazuh-certs-generator: image: wazuh/wazuh-certs-generator:0.0.1 hostname: wazuh-certs-generator + depends_on: + generate-certs-config: + condition: service_completed_successfully entrypoint: sh -c "/entrypoint.sh; chown -R 1000:999 /certificates; chmod 740 /certificates; chmod 440 /certificates/*" volumes: - ./certs/:/certificates/ @@ -105,6 +137,12 @@ services: " - localhost\n"\ " ip:\n"\ " - 127.0.0.1\n"\ + " - name: kibana\n"\ + " dns:\n"\ + " - kibana\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ > config/certs/instances.yml; bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; unzip config/certs/certs.zip -d config/certs; @@ -181,12 +219,15 @@ services: - ELASTICSEARCH_USERNAME=kibana_system - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + - SERVER_SSL_ENABLED=true + - SERVER_SSL_KEY=/usr/share/kibana/config/certs/kibana/kibana.key + - SERVER_SSL_CERTIFICATE=/usr/share/kibana/config/certs/kibana/kibana.crt mem_limit: ${MEM_LIMIT} healthcheck: test: [ 'CMD-SHELL', - "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + "curl -s -I https://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", ] interval: 10s timeout: 10s diff --git a/integrations/docker/compose.indexer-opensearch.yml b/integrations/docker/compose.indexer-opensearch.yml index 2cfa537cbcc50..ed878c8054b92 100644 --- a/integrations/docker/compose.indexer-opensearch.yml +++ b/integrations/docker/compose.indexer-opensearch.yml @@ -72,10 +72,36 @@ services: SERVER.SSL_CERTIFICATE: '/usr/share/opensearch-dashboards/config/certs/opensearch.pem' OPENSEARCH_SSL_CERTIFICATEAUTHORITIES: '/usr/share/opensearch-dashboards/config/certs/root-ca.pem' + generate-certs-config: + image: alpine:latest + volumes: + - ./config:/config + command: | + sh -c " + echo ' + nodes: + indexer: + - name: wazuh.indexer + ip: \"wazuh.indexer\" + - name: opensearch.node + ip: \"opensearch.node\" + server: + - name: wazuh.manager + ip: \"wazuh.manager\" + dashboard: + - name: wazuh.dashboard + ip: \"wazuh.dashboard\" + - name: opensearch.dashboards + ip: \"opensearch.dashboards\" + ' > /config/certs.yml + " wazuh-certs-generator: image: wazuh/wazuh-certs-generator:0.0.1 hostname: wazuh-certs-generator + depends_on: + generate-certs-config: + condition: service_completed_successfully entrypoint: sh -c "/entrypoint.sh; chown -R 1000:999 /certificates; chmod 740 /certificates; chmod 440 /certificates/*" volumes: - ./certs/:/certificates/ diff --git a/integrations/docker/compose.indexer-splunk.yml b/integrations/docker/compose.indexer-splunk.yml index 91d4f346cca81..1336575bb0e45 100644 --- a/integrations/docker/compose.indexer-splunk.yml +++ b/integrations/docker/compose.indexer-splunk.yml @@ -59,12 +59,44 @@ services: - 5601:5601 # Map host port 5601 to container port 5601 expose: - "5601" # Expose port 5601 for web access to OpenSearch Dashboards + volumes: + - ./certs/:/usr/share/opensearch-dashboards/config/certs/ + - ./certs/wazuh.dashboard-key.pem:/usr/share/opensearch-dashboards/config/certs/opensearch.key + - ./certs/wazuh.dashboard.pem:/usr/share/opensearch-dashboards/config/certs/opensearch.pem + - ./certs/root-ca.pem:/usr/share/opensearch-dashboards/config/certs/root-ca.pem environment: OPENSEARCH_HOSTS: '["https://wazuh.indexer:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query + SERVER_SSL_ENABLED: 'true' + SERVER_SSL_KEY: '/usr/share/opensearch-dashboards/config/certs/opensearch.key' + SERVER_SSL_CERTIFICATE: '/usr/share/opensearch-dashboards/config/certs/opensearch.pem' + OPENSEARCH_SSL_CERTIFICATEAUTHORITIES: '/usr/share/opensearch-dashboards/config/certs/root-ca.pem' + + generate-certs-config: + image: alpine:latest + volumes: + - ./config:/config + command: | + sh -c " + echo ' + nodes: + indexer: + - name: wazuh.indexer + ip: \"wazuh.indexer\" + server: + - name: wazuh.manager + ip: \"wazuh.manager\" + dashboard: + - name: wazuh.dashboard + ip: \"wazuh.dashboard\" + ' > /config/certs.yml + " wazuh-certs-generator: image: wazuh/wazuh-certs-generator:0.0.1 hostname: wazuh-certs-generator + depends_on: + generate-certs-config: + condition: service_completed_successfully entrypoint: sh -c "/entrypoint.sh; chown -R 1000:999 /certificates; chmod 740 /certificates; chmod 440 /certificates/*" volumes: - ./certs/:/certificates/ diff --git a/integrations/docker/compose.manager-elastic.yml b/integrations/docker/compose.manager-elastic.yml index e98a2871dff8b..12ee41527ee24 100644 --- a/integrations/docker/compose.manager-elastic.yml +++ b/integrations/docker/compose.manager-elastic.yml @@ -150,6 +150,12 @@ services: " - localhost\n"\ " ip:\n"\ " - 127.0.0.1\n"\ + " - name: kibana\n"\ + " dns:\n"\ + " - kibana\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ > config/certs/instances.yml; bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; unzip config/certs/certs.zip -d config/certs; @@ -226,12 +232,15 @@ services: - ELASTICSEARCH_USERNAME=kibana_system - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + - SERVER_SSL_ENABLED=true + - SERVER_SSL_KEY=/usr/share/kibana/config/certs/kibana/kibana.key + - SERVER_SSL_CERTIFICATE=/usr/share/kibana/config/certs/kibana/kibana.crt mem_limit: ${MEM_LIMIT} healthcheck: test: [ 'CMD-SHELL', - "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + "curl -s -I https://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", ] interval: 10s timeout: 10s