From 44338dcad5d9f6bc6ca0db0072cb2f87b6ede299 Mon Sep 17 00:00:00 2001 From: jack-berg <34418638+jack-berg@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:27:22 -0500 Subject: [PATCH] Migrate confluent cloud collector example to kubernetes (#626) * Add confluent cloud kubernetes example * Replace existing confluent example * Confluent Cloud Kafka --- README.md | 9 +- other-examples/collector/confluentcloud/.env | 22 ---- .../collector/confluentcloud/README.md | 91 ++++++++++---- .../collector/confluentcloud/collector.yaml | 37 ------ .../confluentcloud/docker-compose.yaml | 17 --- .../confluentcloud/k8s/collector.yaml | 112 ++++++++++++++++++ .../collector/confluentcloud/k8s/secrets.yaml | 13 ++ 7 files changed, 198 insertions(+), 103 deletions(-) delete mode 100644 other-examples/collector/confluentcloud/.env delete mode 100644 other-examples/collector/confluentcloud/collector.yaml delete mode 100644 other-examples/collector/confluentcloud/docker-compose.yaml create mode 100644 other-examples/collector/confluentcloud/k8s/collector.yaml create mode 100644 other-examples/collector/confluentcloud/k8s/secrets.yaml diff --git a/README.md b/README.md index c03172a0..6442d9cd 100644 --- a/README.md +++ b/README.md @@ -19,15 +19,16 @@ The [Getting Started Guides](./getting-started-guides/README.md) demonstrate how OpenTelemetry is a big ecosystem and everything doesn't fit into the goals of the [getting started guides](#getting-started-guides). These "other examples" demonstrate how other areas of OpenTelemetry fit in with New Relic. -* Collector +* [Collector for data processing](./other-examples/collector/nr-config) +* Collector for infrastructure monitoring + * [Confluent cloud kafka monitoring](./other-examples/collector/confluentcloud) * [Docker monitoring](./other-examples/collector/docker) - * [Telemetry data processing](./other-examples/collector/nr-config) - * [Host monitoring](./other-examples/collector/host-monitoring) - * [Confluent cloud monitoring](./other-examples/collector/confluentcloud) * [HCP Consul monitoring](./other-examples/collector/hcp-consul) + * [Host monitoring](./other-examples/collector/host-monitoring) * [Prometheus monitoring](./other-examples/collector/prometheus) * [Redis monitoring](./other-examples/collector/redis) * [Singlestore monitoring](./other-examples/collector/singlestore) + * [Singlestore monitoring](./other-examples/collector/singlestore) * [StatsD monitoring](./other-examples/collector/statsd) * Java * [OpenTelemetry Agent New Relic Config](./other-examples/java/agent-nr-config) diff --git a/other-examples/collector/confluentcloud/.env b/other-examples/collector/confluentcloud/.env deleted file mode 100644 index ac0779ef..00000000 --- a/other-examples/collector/confluentcloud/.env +++ /dev/null @@ -1,22 +0,0 @@ -# New Relic API key to authenticate the call. -# docs: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#license-key -NEW_RELIC_API_KEY= - -# The default US endpoint is set here. You can change the endpoint and port based on your requirements if needed. -# docs: https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/get-started/opentelemetry-set-up-your-app/#review-settings -NEW_RELIC_OTLP_ENDPOINT=https://otlp.nr-data.net/ - - -# Set your authentication keys for the Confluent Cloud metrics API. -# docs: https://docs.confluent.io/cloud/current/monitoring/metrics-api.html -CONFLUENT_API_KEY= -CONFLUENT_API_SECRET= - - -# Set your Cluster ID here. -# docs: https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html -CLUSTER_ID= - -# OPTIONAL - if you include these be sure to uncomment them in the docker-compose file and in the collector file. -SCHEMA_REGISTRY_ID= -CONNECTOR_ID= \ No newline at end of file diff --git a/other-examples/collector/confluentcloud/README.md b/other-examples/collector/confluentcloud/README.md index 9c35953e..77fdba7e 100644 --- a/other-examples/collector/confluentcloud/README.md +++ b/other-examples/collector/confluentcloud/README.md @@ -1,34 +1,79 @@ -# Confluent Cloud OpenTelemetry metrics example setup +# Monitoring Confluent Cloud Kafka with OpenTelemetry Collector -This example shows a setup for running a prometheus OpenTelemetry Collector in a docker container to scrape metrics from Confluent Cloud and post them the New Relic OTLP Collector Endpoint. +This simple example demonstrates monitoring Confluent Cloud prometheus metrics with the [OpenTelemetry collector](https://opentelemetry.io/docs/collector/), using the [prometheus receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver) and sending the data to New Relic via OTLP. -For more information, please see our [Kafka with Confluent documentation](https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/collector/collector-configuration-examples/opentelemetry-collector-kafka-confluentcloud/). +## Requirements -## Prerequisites +* You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. Docker desktop [includes a standalone Kubernetes server and client](https://docs.docker.com/desktop/kubernetes/) which is useful for local testing. +* [A New Relic account](https://one.newrelic.com/) +* [A New Relic license key](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#license-key) +* [A Confluent Cloud account](https://www.confluent.io/get-started/) with a cluster running +* [A Confluent Cloud API key and secret](https://docs.confluent.io/confluent-cli/current/command-reference/api-key/confluent_api-key_create.html) -1. You must have a Docker daemon running. -2. You must have [Docker compose](https://docs.docker.com/compose/) installed . -3. You must have a [Confluent Cloud account](https://www.confluent.io/get-started/) with a cluster running. ## Running the example -First, set your environment variables in the `.env` file in this directory. For more information on the individual variables, reference the docs available below. -Once the variables are set, run the following command from the root directory to start the collector. +1. Update the `NEW_RELIC_API_KEY`, `CONFLUENT_API_KEY`, and `CONFLUENT_API_SECRET` values in [secrets.yaml](./k8s/secrets.yaml) to your New Relic license key, and confluent API key / secret respectively. See [Confluent docs](https://docs.confluent.io/cloud/current/monitoring/metrics-api.html) for obtaining API key / secret. -```shell -cd ./other-examples/collector/confluentcloud + ```yaml + # ...omitted for brevity + stringData: + # New Relic API key to authenticate the export requests. + # docs: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#license-key + NEW_RELIC_API_KEY: + # Set your authentication keys for the Confluent Cloud metrics API. + # docs: https://docs.confluent.io/cloud/current/monitoring/metrics-api.html + CONFLUENT_API_KEY: + CONFLUENT_API_SECRET: + ``` + + * Note, be careful to avoid inadvertent secret sharing when modifying `secrets.yaml`. To ignore changes to this file from git, run `git update-index --skip-worktree k8s/secrets.yaml`. + + * If your account is based in the EU, update the `NEW_RELIC_OTLP_ENDPOINT` value in [collector.yaml](./k8s/collector.yaml) the endpoint to: [https://otlp.eu01.nr-data.net](https://otlp.eu01.nr-data.net) + + ```yaml + # ...omitted for brevity + env: + # The default US endpoint is set here. You can change the endpoint and port based on your requirements if needed. + # docs: https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/best-practices/opentelemetry-otlp/#configure-endpoint-port-protocol + - name: NEW_RELIC_OTLP_ENDPOINT + value: https://otlp.eu01.nr-data.net + ``` + +2. Set the `CONFLUENT_CLUSTER_ID` env var value in [collector.yaml](./k8s/collector.yaml). See [Confluent docs](https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html#description) for details on obtaining cluster id. + + ```yaml + # ...omitted for brevity + # Set your Confluent Cluster ID here. + # docs: https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html + - name: CONFLUENT_CLUSTER_ID + value: + ``` + + * Optionally, uncomment and set the value for `CONFLUENT_SCHEMA_REGISTRY_ID` and `CONFLUENT_CONNECTOR_ID`. If setting these, you must also uncomment the corresponding references in `.receivers.prometheus.config.scrape_configs[0].params` of the collector-config ConfigMap. + +3. Run the application with the following command. + + ```shell + kubectl apply -f k8s/ + ``` + + * When finished, cleanup resources with the following command. This is also useful to reset if modifying configuration. + + ```shell + kubectl delete -f k8s/ + ``` + +## Viewing your data + +To review your statsd data in New Relic, navigate to "New Relic -> Query Your Data". To list the metrics reported, query for: -docker compose up ``` +FROM Metric SELECT uniques(metricName) WHERE otel.library.name = 'otelcol/prometheusreceiver' AND metricName like 'confluent_kafka%' LIMIT MAX +``` + +See [get started with querying](https://docs.newrelic.com/docs/query-your-data/explore-query-data/get-started/introduction-querying-new-relic-data/) for additional details on querying data in New Relic. + +## Additional notes -## Local Variable information - -| Variable | Description | Docs | -| -------- | ----------- | ---- | -| **NEW_RELIC_API_KEY** |New Relic Ingest API Key |[API Key docs](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/) | -| **NEW_RELIC_OTLP_ENDPOINT** |Default US OTLP endpoint is https://otlp.nr-data.net | [OTLP endpoint config docs](https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/get-started/opentelemetry-set-up-your-app/#review-settings) | -| **CONFLUENT_API_KEY** |API key for Confluent Cloud, can be created via cli by following the docs |[Confluent API key docs](https://docs.confluent.io/cloud/current/monitoring/metrics-api.html)| -| **CONFLUENT_API_SECRET** | API secret for Confluent Cloud | [Confluent API key docs](https://docs.confluent.io/cloud/current/monitoring/metrics-api.html) | -| **CLUSTER_ID** | ID of the cluster from Confluent Cloud | [List cluster ID docs](https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html#description) | -| **CONNECTOR_ID** |(Optional) ID of the connector from Confluent Cloud | [List connector ID docs](https://docs.confluent.io/confluent-cli/current/command-reference/connect/cluster/confluent_connect_cluster_list.html) | -| **SCHEMA_REGISTRY_ID** | (Optional) ID of schema registry from Confluent Cloud | [List schema-registry ID docs](https://docs.confluent.io/confluent-cli/current/command-reference/schema-registry/schema/confluent_schema-registry_schema_list.html) | +The prometheus receiver includes `service.name` and `service.instance.id` resource attributes derived from job name and target configured in `.receivers.prometheus.config.scrape_configs`. As documented [here](https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/best-practices/opentelemetry-best-practices-resources/#services), New Relic considers any data with `service.name` as a service despite the fact that not all prometheus data sources are services. As a result, you can find a `confluent` entity under "New Relic -> All Entities -> Services - OpenTelemetry", although the panels will not contain data because the scraped metrics do not represent APM data. diff --git a/other-examples/collector/confluentcloud/collector.yaml b/other-examples/collector/confluentcloud/collector.yaml deleted file mode 100644 index a493cfb9..00000000 --- a/other-examples/collector/confluentcloud/collector.yaml +++ /dev/null @@ -1,37 +0,0 @@ -receivers: - prometheus: - config: - scrape_configs: - - job_name: "confluent" - scrape_interval: 60s # Do not go any lower than this or you'll hit rate limits - static_configs: - - targets: ["api.telemetry.confluent.cloud"] - scheme: https - basic_auth: - username: $CONFLUENT_API_KEY - password: $CONFLUENT_API_SECRET - metrics_path: /v2/metrics/cloud/export - params: - "resource.kafka.id": - - $CLUSTER_ID - # OPTIONAL - You can include monitoring for Confluent connectors or schema registry's by including the ID here. - # "resource.connector.id": - # - $CONNECTOR_ID - # "resource.schema_registry.id": - # - $SCHEMA_REGISTRY_ID - -processors: - batch: - -exporters: - otlphttp: - endpoint: $NEW_RELIC_OTLP_ENDPOINT - headers: - api-key: $NEW_RELIC_API_KEY - -service: - pipelines: - metrics: - receivers: [prometheus] - processors: [batch] - exporters: [otlphttp] \ No newline at end of file diff --git a/other-examples/collector/confluentcloud/docker-compose.yaml b/other-examples/collector/confluentcloud/docker-compose.yaml deleted file mode 100644 index 1770a486..00000000 --- a/other-examples/collector/confluentcloud/docker-compose.yaml +++ /dev/null @@ -1,17 +0,0 @@ -version: "3.6" - -services: - - otel-collector: - image: otel/opentelemetry-collector-contrib:0.92.0 - command: --config=/etc/otelcol/config.yaml - volumes: - - ./collector.yaml:/etc/otelcol/config.yaml - environment: - - NEW_RELIC_OTLP_ENDPOINT - - NEW_RELIC_API_KEY - - CONFLUENT_API_KEY - - CONFLUENT_API_SECRET - - CLUSTER_ID - - CONNECTOR_ID - - SCHEMA_REGISTRY_ID diff --git a/other-examples/collector/confluentcloud/k8s/collector.yaml b/other-examples/collector/confluentcloud/k8s/collector.yaml new file mode 100644 index 00000000..1ebe4a4e --- /dev/null +++ b/other-examples/collector/confluentcloud/k8s/collector.yaml @@ -0,0 +1,112 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: nr-confluent +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: collector-config + namespace: nr-confluent + labels: + app.kubernetes.io/name: collector-config +data: + collector-config: | + receivers: + prometheus: + config: + scrape_configs: + - job_name: "confluent" + scrape_interval: 60s # Do not go any lower than this or you'll hit rate limits + static_configs: + - targets: ["api.telemetry.confluent.cloud"] + scheme: https + basic_auth: + username: $CONFLUENT_API_KEY + password: $CONFLUENT_API_SECRET + metrics_path: /v2/metrics/cloud/export + params: + "resource.kafka.id": + - $CONFLUENT_CLUSTER_ID + # OPTIONAL - You can include monitoring for Confluent connectors or schema registry's by including the ID here. + #"resource.connector.id": + # - $CONFLUENT_CONNECTOR_ID + #"resource.schema_registry.id": + # - $CONFLUENT_SCHEMA_REGISTRY_ID + + processors: + batch: + + exporters: + otlphttp: + endpoint: ${NEW_RELIC_OTLP_ENDPOINT} + headers: + api-key: ${NEW_RELIC_API_KEY} + + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [otlphttp] +--- +apiVersion: v1 +kind: Pod +metadata: + name: collector + namespace: nr-confluent + labels: + app.kubernetes.io/name: collector +spec: + containers: + - name: collector + image: otel/opentelemetry-collector-contrib:0.98.0 + env: + # The default US endpoint is set here. You can change the endpoint and port based on your requirements if needed. + # docs: https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/best-practices/opentelemetry-otlp/#configure-endpoint-port-protocol + - name: NEW_RELIC_OTLP_ENDPOINT + value: https://otlp.nr-data.net/ + # The New Relic API key used to authenticate export requests. + # Defined in secrets.yaml + - name: NEW_RELIC_API_KEY + valueFrom: + secretKeyRef: + name: nr-confluent-secret + key: NEW_RELIC_API_KEY + # The Confluent API key. + # Defined in secrets.yaml + - name: CONFLUENT_API_KEY + valueFrom: + secretKeyRef: + name: nr-confluent-secret + key: CONFLUENT_API_KEY + # The Confluent API secret. + # Defined in secrets.yaml + - name: CONFLUENT_API_SECRET + valueFrom: + secretKeyRef: + name: nr-confluent-secret + key: CONFLUENT_API_SECRET + # Set your Confluent Cluster ID here. + # docs: https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html + - name: CONFLUENT_CLUSTER_ID + value: + # OPTIONAL: Set your Confluent Schema Registry ID here, and uncomment reference in .receivers.prometheus.config.scrape_configs[0].params + # docs: https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html + #- name: CONFLUENT_SCHEMA_REGISTRY_ID + # value: + # OPTIONAL: Set your Confluent Connector ID here, and uncomment reference in .receivers.prometheus.config.scrape_configs[0].params + # docs: https://docs.confluent.io/confluent-cli/current/command-reference/kafka/cluster/confluent_kafka_cluster_list.html + #- name: CONFLUENT_CONNECTOR_ID + # value: + volumeMounts: + - name: collector-config-vol + mountPath: /etc/otelcol-contrib + volumes: + - name: collector-config-vol + configMap: + name: collector-config + items: + - key: collector-config + path: config.yaml diff --git a/other-examples/collector/confluentcloud/k8s/secrets.yaml b/other-examples/collector/confluentcloud/k8s/secrets.yaml new file mode 100644 index 00000000..c2579d0c --- /dev/null +++ b/other-examples/collector/confluentcloud/k8s/secrets.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: nr-confluent-secret + namespace: nr-confluent +stringData: + # New Relic API key to authenticate the export requests. + # docs: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#license-key + NEW_RELIC_API_KEY: + # Set your authentication keys for the Confluent Cloud metrics API. + # docs: https://docs.confluent.io/cloud/current/monitoring/metrics-api.html + CONFLUENT_API_KEY: + CONFLUENT_API_SECRET: