From d5e2739558030831d8c9dd3299229151a03703b2 Mon Sep 17 00:00:00 2001 From: AlexsJones Date: Wed, 30 Oct 2024 11:24:46 +0000 Subject: [PATCH] chore: improved custom analyzers docs Signed-off-by: AlexsJones --- README.md | 55 ++++++++++++++---------- config/samples/core_v1alpha1_k8sgpt.yaml | 37 ++++++++++------ 2 files changed, 57 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index d177559a..520b8abb 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ helm install release k8sgpt/k8sgpt-operator -n k8sgpt-operator-system --create-n kubectl create secret generic k8sgpt-sample-secret --from-literal=openai-api-key=$OPENAI_TOKEN -n k8sgpt-operator-system ``` -3. Apply the K8sGPT configuration object: +3. Apply the K8sGPT configuration object (See [here](https://github.com/k8sgpt-ai/k8sgpt-operator/blob/main/config/samples/core_v1alpha1_k8sgpt.yaml) for exhaustive options): ```sh kubectl apply -f - << EOF @@ -47,30 +47,9 @@ spec: secret: name: k8sgpt-sample-secret key: openai-api-key - # backOff: - # enabled: false - # maxRetries: 5 - # anonymized: false - # language: english - # proxyEndpoint: https://10.255.30.150 # use proxyEndpoint to setup backend through an HTTP/HTTPS proxy noCache: false repository: ghcr.io/k8sgpt-ai/k8sgpt version: v0.3.41 - #integrations: - # trivy: - # enabled: true - # namespace: trivy-system - # filters: - # - Ingress - # sink: - # type: slack - # webhook: # use the sink secret if you want to keep your webhook url private - # secret: - # name: slack-webhook - # key: url - #extraOptions: - # backstage: - # enabled: true EOF ``` @@ -383,6 +362,38 @@ Note: ensure that the value of `baseUrl` is a properly constructed [DNS name](ht +## Custom Analyzers +There may be scenarios where you wish to write your own analyzer in a language of your choice. +K8sGPT now supports the ability to do so by abiding by the schema and serving the analyzer for consumption. + +K8sGPT-Operator supports [Custom Analyzers](https://github.com/k8sgpt-ai/k8sgpt?tab=readme-ov-file#key-features) and they can be setup with the following configuration: + +``` +kubectl apply -f - << EOF +apiVersion: core.k8sgpt.ai/v1alpha1 +kind: K8sGPT +metadata: + name: k8sgpt-sample + namespace: k8sgpt-operator-system +spec: + ai: + enabled: true + model: gpt-3.5-turbo + backend: openai + secret: + name: k8sgpt-sample-secret + key: openai-api-key + noCache: false + repository: ghcr.io/k8sgpt-ai/k8sgpt + version: v0.3.41 + customAnalyzers: + - name: Foo + connection: + url: localhost + port: 8085 +EOF +``` + ## K8sGPT Configuration Options
diff --git a/config/samples/core_v1alpha1_k8sgpt.yaml b/config/samples/core_v1alpha1_k8sgpt.yaml index 099ca245..3a79b6f5 100644 --- a/config/samples/core_v1alpha1_k8sgpt.yaml +++ b/config/samples/core_v1alpha1_k8sgpt.yaml @@ -5,27 +5,38 @@ metadata: namespace: k8sgpt-operator-system spec: ai: + enabled: true model: gpt-3.5-turbo backend: openai - enabled: true secret: name: k8sgpt-sample-secret key: openai-api-key + backOff: + enabled: false + maxRetries: 5 + anonymized: false + language: english + proxyEndpoint: https://10.255.30.150 # use proxyEndpoint to setup backend through an HTTP/HTTPS proxy noCache: false - version: v0.3.39 + repository: ghcr.io/k8sgpt-ai/k8sgpt + version: v0.3.41 customAnalyzers: - name: Foo connection: url: localhost port: 8085 - # remoteCache: - # credentials: - # name: k8sgpt-sample-cache-secret - # s3: - # bucketName: foo - # region: us-west-1 - # integrations: - # trivy: - # enabled: false - # namespace: trivy-system - # skipInstall: false + integrations: + trivy: + enabled: true + namespace: trivy-system + filters: + - Ingress + sink: + type: slack + webhook: # use the sink secret if you want to keep your webhook url private + secret: + name: slack-webhook + key: url + extraOptions: + backstage: + enabled: true \ No newline at end of file